repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
OpenSourcePolicyCenter/dynamic | ogusa/demographics.py | 1 | 23664 | '''
------------------------------------------------------------------------
Functions for generating demographic objects necessary for the OG-USA
model
------------------------------------------------------------------------
'''
# Import packages
import os
import numpy as np
import scipy.optimize as opt
import scipy.interpolate as si
import pandas as pd
from ogusa import parameter_plots as pp
# create output director for figures
CUR_PATH = os.path.split(os.path.abspath(__file__))[0]
OUTPUT_DIR = os.path.join(CUR_PATH, 'OUTPUT', 'Demographics')
if os.access(OUTPUT_DIR, os.F_OK) is False:
os.makedirs(OUTPUT_DIR)
'''
------------------------------------------------------------------------
Define functions
------------------------------------------------------------------------
'''
def get_fert(totpers, min_yr, max_yr, graph=False):
'''
This function generates a vector of fertility rates by model period
age that corresponds to the fertility rate data by age in years
(Source: National Vital Statistics Reports, Volume 64, Number 1,
January 15, 2015, Table 3, final 2013 data
http://www.cdc.gov/nchs/data/nvsr/nvsr64/nvsr64_01.pdf)
Args:
totpers (int): total number of agent life periods (E+S), >= 3
min_yr (int): age in years at which agents are born, >= 0
max_yr (int): age in years at which agents die with certainty,
>= 4
graph (bool): =True if want graphical output
Returns:
fert_rates (Numpy array): fertility rates for each model period
of life
'''
# Get current population data (2013) for weighting
pop_file = os.path.join(
CUR_PATH, 'data', 'demographic', 'pop_data.csv')
pop_data = pd.read_csv(pop_file, thousands=',')
pop_data_samp = pop_data[(pop_data['Age'] >= min_yr - 1) &
(pop_data['Age'] <= max_yr - 1)]
curr_pop = np.array(pop_data_samp['2013'], dtype='f')
curr_pop_pct = curr_pop / curr_pop.sum()
# Get fertility rate by age-bin data
fert_data = (np.array([0.0, 0.0, 0.3, 12.3, 47.1, 80.7, 105.5, 98.0,
49.3, 10.4, 0.8, 0.0, 0.0]) / 2000)
# Mid points of age bins
age_midp = np.array([9, 10, 12, 16, 18.5, 22, 27, 32, 37, 42, 47,
55, 56])
# Generate interpolation functions for fertility rates
fert_func = si.interp1d(age_midp, fert_data, kind='cubic')
# Calculate average fertility rate in each age bin using trapezoid
# method with a large number of points in each bin.
binsize = (max_yr - min_yr + 1) / totpers
num_sub_bins = float(10000)
len_subbins = (np.float64(100 * num_sub_bins)) / totpers
age_sub = (np.linspace(np.float64(binsize) / num_sub_bins,
np.float64(max_yr),
int(num_sub_bins*max_yr)) - 0.5 *
np.float64(binsize) / num_sub_bins)
curr_pop_sub = np.repeat(np.float64(curr_pop_pct) / num_sub_bins,
num_sub_bins)
fert_rates_sub = np.zeros(curr_pop_sub.shape)
pred_ind = (age_sub > age_midp[0]) * (age_sub < age_midp[-1])
age_pred = age_sub[pred_ind]
fert_rates_sub[pred_ind] = np.float64(fert_func(age_pred))
fert_rates = np.zeros(totpers)
end_sub_bin = 0
for i in range(totpers):
beg_sub_bin = int(end_sub_bin)
end_sub_bin = int(np.rint((i + 1) * len_subbins))
fert_rates[i] = ((
curr_pop_sub[beg_sub_bin:end_sub_bin] *
fert_rates_sub[beg_sub_bin:end_sub_bin]).sum() /
curr_pop_sub[beg_sub_bin:end_sub_bin].sum())
if graph:
pp.plot_fert_rates(fert_func, age_midp, totpers, min_yr, max_yr,
fert_data, fert_rates, output_dir=OUTPUT_DIR)
return fert_rates
def get_mort(totpers, min_yr, max_yr, graph=False):
'''
This function generates a vector of mortality rates by model period
age.
(Source: Male and Female death probabilities Actuarial Life table,
2011 Social Security Administration,
http://www.ssa.gov/oact/STATS/table4c6.html)
Args:
totpers (int): total number of agent life periods (E+S), >= 3
min_yr (int): age in years at which agents are born, >= 0
max_yr (int): age in years at which agents die with certainty,
>= 4
graph (bool): =True if want graphical output
Returns:
mort_rates (Numpy array) mortality rates that correspond to each
period of life
infmort_rate (scalar): infant mortality rate from 2015 U.S. CIA
World Factbook
'''
# Get mortality rate by age data
infmort_rate = 0.00587 # taken from 2015 U.S. infant mortality rate
mort_file = os.path.join(
CUR_PATH, 'data', 'demographic', 'mort_rates2011.csv')
mort_data = pd.read_csv(mort_file, thousands=',')
age_year_all = mort_data['Age'] + 1
mort_rates_all = (
((mort_data['Male Mort. Rate'] * mort_data['Num. Male Lives']) +
(mort_data['Female Mort. Rate'] *
mort_data['Num. Female Lives'])) /
(mort_data['Num. Male Lives'] + mort_data['Num. Female Lives']))
age_year_all = age_year_all[np.isfinite(mort_rates_all)]
mort_rates_all = mort_rates_all[np.isfinite(mort_rates_all)]
# Calculate implied mortality rates in sub-bins of mort_rates_all.
mort_rates_mxyr = mort_rates_all[0:max_yr]
num_sub_bins = int(100)
len_subbins = ((np.float64((max_yr - min_yr + 1) * num_sub_bins)) /
totpers)
mort_rates_sub = np.zeros(num_sub_bins * max_yr, dtype=float)
for i in range(max_yr):
mort_rates_sub[i * num_sub_bins:(i + 1) * num_sub_bins] =\
(1 - ((1 - mort_rates_mxyr[i]) ** (1.0 / num_sub_bins)))
mort_rates = np.zeros(totpers)
end_sub_bin = 0
for i in range(totpers):
beg_sub_bin = int(end_sub_bin)
end_sub_bin = int(np.rint((i + 1) * len_subbins))
mort_rates[i] = (
1 - (1 - (mort_rates_sub[beg_sub_bin:end_sub_bin])).prod())
mort_rates[-1] = 1 # Mortality rate in last period is set to 1
if graph:
pp.plot_mort_rates_data(totpers, min_yr, max_yr, age_year_all,
mort_rates_all, infmort_rate,
mort_rates, output_dir=OUTPUT_DIR)
return mort_rates, infmort_rate
def pop_rebin(curr_pop_dist, totpers_new):
'''
For cases in which totpers (E+S) is less than the number of periods
in the population distribution data, this function calculates a new
population distribution vector with totpers (E+S) elements.
Args:
curr_pop_dist (Numpy array): population distribution over N
periods
totpers_new (int): number of periods to which we are
transforming the population distribution, >= 3
Returns:
curr_pop_new (Numpy array): new population distribution over
totpers (E+S) periods that approximates curr_pop_dist
'''
# Number of periods in original data
assert totpers_new >= 3
totpers_orig = len(curr_pop_dist)
if int(totpers_new) == totpers_orig:
curr_pop_new = curr_pop_dist
elif int(totpers_new) < totpers_orig:
num_sub_bins = float(10000)
curr_pop_sub = np.repeat(np.float64(curr_pop_dist) /
num_sub_bins, num_sub_bins)
len_subbins = ((np.float64(totpers_orig*num_sub_bins)) /
totpers_new)
curr_pop_new = np.zeros(totpers_new, dtype=np.float64)
end_sub_bin = 0
for i in range(totpers_new):
beg_sub_bin = int(end_sub_bin)
end_sub_bin = int(np.rint((i + 1) * len_subbins))
curr_pop_new[i] = \
curr_pop_sub[beg_sub_bin:end_sub_bin].sum()
# Return curr_pop_new to single precision float (float32)
# datatype
curr_pop_new = np.float32(curr_pop_new)
return curr_pop_new
def get_imm_resid(totpers, min_yr, max_yr):
'''
Calculate immigration rates by age as a residual given population
levels in different periods, then output average calculated
immigration rate. We have to replace the first mortality rate in
this function in order to adjust the first implied immigration rate
(Source: Population data come from Annual Estimates of the Resident
Population by Single Year of Age and Sex: April 1, 2010 to July 1,
2013 (Both sexes) National Characteristics, Vintage 2013, US Census
Bureau,
http://www.census.gov/popest/data/national/asrh/2013/index.html)
Args:
totpers (int): total number of agent life periods (E+S), >= 3
min_yr (int): age in years at which agents are born, >= 0
max_yr (int): age in years at which agents die with certainty,
>= 4
graph (bool): =True if want graphical output
Returns:
imm_rates (Numpy array):immigration rates that correspond to
each period of life, length E+S
'''
pop_file = os.path.join(
CUR_PATH, 'data', 'demographic', 'pop_data.csv')
pop_data = pd.read_csv(pop_file, thousands=',')
pop_data_samp = pop_data[(pop_data['Age'] >= min_yr - 1) &
(pop_data['Age'] <= max_yr - 1)]
pop_2010, pop_2011, pop_2012, pop_2013 = (
np.array(pop_data_samp['2010'], dtype='f'),
np.array(pop_data_samp['2011'], dtype='f'),
np.array(pop_data_samp['2012'], dtype='f'),
np.array(pop_data_samp['2013'], dtype='f'))
pop_2010_EpS = pop_rebin(pop_2010, totpers)
pop_2011_EpS = pop_rebin(pop_2011, totpers)
pop_2012_EpS = pop_rebin(pop_2012, totpers)
pop_2013_EpS = pop_rebin(pop_2013, totpers)
# Create three years of estimated immigration rates for youngest age
# individuals
imm_mat = np.zeros((3, totpers))
pop11vec = np.array([pop_2010_EpS[0], pop_2011_EpS[0],
pop_2012_EpS[0]])
pop21vec = np.array([pop_2011_EpS[0], pop_2012_EpS[0],
pop_2013_EpS[0]])
fert_rates = get_fert(totpers, min_yr, max_yr, False)
mort_rates, infmort_rate = get_mort(totpers, min_yr, max_yr, False)
newbornvec = np.dot(fert_rates, np.vstack((pop_2010_EpS,
pop_2011_EpS,
pop_2012_EpS)).T)
imm_mat[:, 0] = ((pop21vec - (1 - infmort_rate) * newbornvec) /
pop11vec)
# Estimate 3 years of immigration rates for all other-aged
# individuals
pop11mat = np.vstack((pop_2010_EpS[:-1], pop_2011_EpS[:-1],
pop_2012_EpS[:-1]))
pop12mat = np.vstack((pop_2010_EpS[1:], pop_2011_EpS[1:],
pop_2012_EpS[1:]))
pop22mat = np.vstack((pop_2011_EpS[1:], pop_2012_EpS[1:],
pop_2013_EpS[1:]))
mort_mat = np.tile(mort_rates[:-1], (3, 1))
imm_mat[:, 1:] = (pop22mat - (1 - mort_mat) * pop11mat) / pop12mat
# Final estimated immigration rates are the averages over 3 years
imm_rates = imm_mat.mean(axis=0)
return imm_rates
def immsolve(imm_rates, *args):
'''
This function generates a vector of errors representing the
difference in two consecutive periods stationary population
distributions. This vector of differences is the zero-function
objective used to solve for the immigration rates vector, similar to
the original immigration rates vector from get_imm_resid(), that
sets the steady-state population distribution by age equal to the
population distribution in period int(1.5*S)
Args:
imm_rates (Numpy array):immigration rates that correspond to
each period of life, length E+S
args (tuple): (fert_rates, mort_rates, infmort_rate, omega_cur,
g_n_SS)
Returns:
omega_errs (Numpy array): difference between omega_new and
omega_cur_pct, length E+S
'''
fert_rates, mort_rates, infmort_rate, omega_cur_lev, g_n_SS = args
omega_cur_pct = omega_cur_lev / omega_cur_lev.sum()
totpers = len(fert_rates)
OMEGA = np.zeros((totpers, totpers))
OMEGA[0, :] = ((1 - infmort_rate) * fert_rates +
np.hstack((imm_rates[0], np.zeros(totpers-1))))
OMEGA[1:, :-1] += np.diag(1 - mort_rates[:-1])
OMEGA[1:, 1:] += np.diag(imm_rates[1:])
omega_new = np.dot(OMEGA, omega_cur_pct) / (1 + g_n_SS)
omega_errs = omega_new - omega_cur_pct
return omega_errs
def get_pop_objs(E, S, T, min_yr, max_yr, curr_year, GraphDiag=False):
'''
This function produces the demographics objects to be used in the
OG-USA model package.
Args:
E (int): number of model periods in which agent is not
economically active, >= 1
S (int): number of model periods in which agent is economically
active, >= 3
T (int): number of periods to be simulated in TPI, > 2*S
min_yr (int): age in years at which agents are born, >= 0
max_yr (int): age in years at which agents die with certainty,
>= 4
curr_year (int): current year for which analysis will begin,
>= 2016
GraphDiag (bool): =True if want graphical output and printed
diagnostics
Returns:
omega_path_S (Numpy array), time path of the population
distribution from the current state to the steady-state,
size T+S x S
g_n_SS (scalar): steady-state population growth rate
omega_SS (Numpy array): normalized steady-state population
distribution, length S
surv_rates (Numpy array): survival rates that correspond to
each model period of life, lenght S
mort_rates (Numpy array): mortality rates that correspond to
each model period of life, length S
g_n_path (Numpy array): population growth rates over the time
path, length T + S
'''
# age_per = np.linspace(min_yr, max_yr, E+S)
fert_rates = get_fert(E + S, min_yr, max_yr, graph=False)
mort_rates, infmort_rate = get_mort(E + S, min_yr, max_yr,
graph=False)
mort_rates_S = mort_rates[-S:]
imm_rates_orig = get_imm_resid(E + S, min_yr, max_yr)
OMEGA_orig = np.zeros((E + S, E + S))
OMEGA_orig[0, :] = ((1 - infmort_rate) * fert_rates +
np.hstack((imm_rates_orig[0], np.zeros(E+S-1))))
OMEGA_orig[1:, :-1] += np.diag(1 - mort_rates[:-1])
OMEGA_orig[1:, 1:] += np.diag(imm_rates_orig[1:])
# Solve for steady-state population growth rate and steady-state
# population distribution by age using eigenvalue and eigenvector
# decomposition
eigvalues, eigvectors = np.linalg.eig(OMEGA_orig)
g_n_SS = (eigvalues[np.isreal(eigvalues)].real).max() - 1
eigvec_raw =\
eigvectors[:,
(eigvalues[np.isreal(eigvalues)].real).argmax()].real
omega_SS_orig = eigvec_raw / eigvec_raw.sum()
# Generate time path of the nonstationary population distribution
omega_path_lev = np.zeros((E + S, T + S))
pop_file = os.path.join(
CUR_PATH, 'data', 'demographic', 'pop_data.csv')
pop_data = pd.read_csv(pop_file, thousands=',')
pop_data_samp = pop_data[(pop_data['Age'] >= min_yr - 1) &
(pop_data['Age'] <= max_yr - 1)]
pop_2013 = np.array(pop_data_samp['2013'], dtype='f')
# Generate the current population distribution given that E+S might
# be less than max_yr-min_yr+1
age_per_EpS = np.arange(1, E + S + 1)
pop_2013_EpS = pop_rebin(pop_2013, E + S)
pop_2013_pct = pop_2013_EpS / pop_2013_EpS.sum()
# Age most recent population data to the current year of analysis
pop_curr = pop_2013_EpS.copy()
data_year = 2013
pop_next = np.dot(OMEGA_orig, pop_curr)
g_n_curr = ((pop_next[-S:].sum() - pop_curr[-S:].sum()) /
pop_curr[-S:].sum()) # g_n in 2013
pop_past = pop_curr # assume 2012-2013 pop
# Age the data to the current year
for per in range(curr_year - data_year):
pop_next = np.dot(OMEGA_orig, pop_curr)
g_n_curr = ((pop_next[-S:].sum() - pop_curr[-S:].sum()) /
pop_curr[-S:].sum())
pop_past = pop_curr
pop_curr = pop_next
# Generate time path of the population distribution
omega_path_lev[:, 0] = pop_curr.copy()
for per in range(1, T + S):
pop_next = np.dot(OMEGA_orig, pop_curr)
omega_path_lev[:, per] = pop_next.copy()
pop_curr = pop_next.copy()
# Force the population distribution after 1.5*S periods to be the
# steady-state distribution by adjusting immigration rates, holding
# constant mortality, fertility, and SS growth rates
imm_tol = 1e-14
fixper = int(1.5 * S)
omega_SSfx = (omega_path_lev[:, fixper] /
omega_path_lev[:, fixper].sum())
imm_objs = (fert_rates, mort_rates, infmort_rate,
omega_path_lev[:, fixper], g_n_SS)
imm_fulloutput = opt.fsolve(immsolve, imm_rates_orig,
args=(imm_objs), full_output=True,
xtol=imm_tol)
imm_rates_adj = imm_fulloutput[0]
imm_diagdict = imm_fulloutput[1]
omega_path_S = (omega_path_lev[-S:, :] /
np.tile(omega_path_lev[-S:, :].sum(axis=0), (S, 1)))
omega_path_S[:, fixper:] = \
np.tile(omega_path_S[:, fixper].reshape((S, 1)),
(1, T + S - fixper))
g_n_path = np.zeros(T + S)
g_n_path[0] = g_n_curr.copy()
g_n_path[1:] = ((omega_path_lev[-S:, 1:].sum(axis=0) -
omega_path_lev[-S:, :-1].sum(axis=0)) /
omega_path_lev[-S:, :-1].sum(axis=0))
g_n_path[fixper + 1:] = g_n_SS
omega_S_preTP = (pop_past.copy()[-S:]) / (pop_past.copy()[-S:].sum())
imm_rates_mat = np.hstack((
np.tile(np.reshape(imm_rates_orig[E:], (S, 1)), (1, fixper)),
np.tile(np.reshape(imm_rates_adj[E:], (S, 1)), (1, T + S - fixper))))
if GraphDiag:
# Check whether original SS population distribution is close to
# the period-T population distribution
omegaSSmaxdif = np.absolute(omega_SS_orig -
(omega_path_lev[:, T] /
omega_path_lev[:, T].sum())).max()
if omegaSSmaxdif > 0.0003:
print('POP. WARNING: Max. abs. dist. between original SS ' +
"pop. dist'n and period-T pop. dist'n is greater than" +
' 0.0003. It is ' + str(omegaSSmaxdif) + '.')
else:
print('POP. SUCCESS: orig. SS pop. dist is very close to ' +
"period-T pop. dist'n. The maximum absolute " +
'difference is ' + str(omegaSSmaxdif) + '.')
# Plot the adjusted steady-state population distribution versus
# the original population distribution. The difference should be
# small
omegaSSvTmaxdiff = np.absolute(omega_SS_orig - omega_SSfx).max()
if omegaSSvTmaxdiff > 0.0003:
print('POP. WARNING: The maximimum absolute difference ' +
'between any two corresponding points in the original'
+ ' and adjusted steady-state population ' +
'distributions is' + str(omegaSSvTmaxdiff) + ', ' +
'which is greater than 0.0003.')
else:
print('POP. SUCCESS: The maximum absolute difference ' +
'between any two corresponding points in the original'
+ ' and adjusted steady-state population ' +
'distributions is ' + str(omegaSSvTmaxdiff))
# Print whether or not the adjusted immigration rates solved the
# zero condition
immtol_solved = \
np.absolute(imm_diagdict['fvec'].max()) < imm_tol
if immtol_solved:
print('POP. SUCCESS: Adjusted immigration rates solved ' +
'with maximum absolute error of ' +
str(np.absolute(imm_diagdict['fvec'].max())) +
', which is less than the tolerance of ' +
str(imm_tol))
else:
print('POP. WARNING: Adjusted immigration rates did not ' +
'solve. Maximum absolute error of ' +
str(np.absolute(imm_diagdict['fvec'].max())) +
' is greater than the tolerance of ' + str(imm_tol))
# Test whether the steady-state growth rates implied by the
# adjusted OMEGA matrix equals the steady-state growth rate of
# the original OMEGA matrix
OMEGA2 = np.zeros((E + S, E + S))
OMEGA2[0, :] = ((1 - infmort_rate) * fert_rates +
np.hstack((imm_rates_adj[0], np.zeros(E+S-1))))
OMEGA2[1:, :-1] += np.diag(1 - mort_rates[:-1])
OMEGA2[1:, 1:] += np.diag(imm_rates_adj[1:])
eigvalues2, eigvectors2 = np.linalg.eig(OMEGA2)
g_n_SS_adj = (eigvalues[np.isreal(eigvalues2)].real).max() - 1
if np.max(np.absolute(g_n_SS_adj - g_n_SS)) > 10 ** (-8):
print('FAILURE: The steady-state population growth rate' +
' from adjusted OMEGA is different (diff is ' +
str(g_n_SS_adj - g_n_SS) + ') than the steady-' +
'state population growth rate from the original' +
' OMEGA.')
elif np.max(np.absolute(g_n_SS_adj - g_n_SS)) <= 10 ** (-8):
print('SUCCESS: The steady-state population growth rate' +
' from adjusted OMEGA is close to (diff is ' +
str(g_n_SS_adj - g_n_SS) + ') the steady-' +
'state population growth rate from the original' +
' OMEGA.')
# Do another test of the adjusted immigration rates. Create the
# new OMEGA matrix implied by the new immigration rates. Plug in
# the adjusted steady-state population distribution. Hit is with
# the new OMEGA transition matrix and it should return the new
# steady-state population distribution
omega_new = np.dot(OMEGA2, omega_SSfx)
omega_errs = np.absolute(omega_new - omega_SSfx)
print('The maximum absolute difference between the adjusted ' +
'steady-state population distribution and the ' +
'distribution generated by hitting the adjusted OMEGA ' +
'transition matrix is ' + str(omega_errs.max()))
# Plot the original immigration rates versus the adjusted
# immigration rates
immratesmaxdiff = \
np.absolute(imm_rates_orig - imm_rates_adj).max()
print('The maximum absolute distance between any two points ' +
'of the original immigration rates and adjusted ' +
'immigration rates is ' + str(immratesmaxdiff))
# plots
pp.plot_omega_fixed(age_per_EpS, omega_SS_orig, omega_SSfx, E,
S, output_dir=OUTPUT_DIR)
pp.plot_imm_fixed(age_per_EpS, imm_rates_orig, imm_rates_adj, E,
S, output_dir=OUTPUT_DIR)
pp.plot_population_path(age_per_EpS, pop_2013_pct,
omega_path_lev, omega_SSfx, curr_year,
E, S, output_dir=OUTPUT_DIR)
# return omega_path_S, g_n_SS, omega_SSfx, survival rates,
# mort_rates_S, and g_n_path
return (omega_path_S.T, g_n_SS, omega_SSfx[-S:] /
omega_SSfx[-S:].sum(), 1-mort_rates_S, mort_rates_S,
g_n_path, imm_rates_mat.T, omega_S_preTP)
| mit |
ajaybhat/scikit-image | doc/examples/transform/plot_swirl.py | 7 | 2712 | """
=====
Swirl
=====
Image swirling is a non-linear image deformation that creates a whirlpool
effect. This example describes the implementation of this transform in
``skimage``, as well as the underlying warp mechanism.
Image warping
-------------
When applying a geometric transformation on an image, we typically make use of
a reverse mapping, i.e., for each pixel in the output image, we compute its
corresponding position in the input. The reason is that, if we were to do it
the other way around (map each input pixel to its new output position), some
pixels in the output may be left empty. On the other hand, each output
coordinate has exactly one corresponding location in (or outside) the input
image, and even if that position is non-integer, we may use interpolation to
compute the corresponding image value.
Performing a reverse mapping
----------------------------
To perform a geometric warp in ``skimage``, you simply need to provide the
reverse mapping to the ``skimage.transform.warp`` function. E.g., consider the
case where we would like to shift an image 50 pixels to the left. The reverse
mapping for such a shift would be::
def shift_left(xy):
xy[:, 0] += 50
return xy
The corresponding call to warp is::
from skimage.transform import warp
warp(image, shift_left)
The swirl transformation
------------------------
Consider the coordinate :math:`(x, y)` in the output image. The reverse
mapping for the swirl transformation first computes, relative to a center
:math:`(x_0, y_0)`, its polar coordinates,
.. math::
\\theta = \\arctan(y/x)
\\rho = \sqrt{(x - x_0)^2 + (y - y_0)^2},
and then transforms them according to
.. math::
r = \ln(2) \, \mathtt{radius} / 5
\phi = \mathtt{rotation}
s = \mathtt{strength}
\\theta' = \phi + s \, e^{-\\rho / r + \\theta}
where ``strength`` is a parameter for the amount of swirl, ``radius`` indicates
the swirl extent in pixels, and ``rotation`` adds a rotation angle. The
transformation of ``radius`` into :math:`r` is to ensure that the
transformation decays to :math:`\\approx 1/1000^{\mathsf{th}}` within the
specified radius.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.transform import swirl
image = data.checkerboard()
swirled = swirl(image, rotation=0, strength=10, radius=120)
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(8, 3),
sharex=True, sharey=True,
subplot_kw={'adjustable':'box-forced'})
ax0.imshow(image, cmap=plt.cm.gray, interpolation='none')
ax0.axis('off')
ax1.imshow(swirled, cmap=plt.cm.gray, interpolation='none')
ax1.axis('off')
plt.show()
| bsd-3-clause |
poryfly/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
pompiduskus/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
walterreade/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
turbomanage/training-data-analyst | CPB100/lab2b/scheduled/transform.py | 4 | 3058 | #!/usr/bin/env python3
# Copyright 2016 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
# See https://github.com/GoogleCloudPlatform/datalab-samples/blob/master/basemap/earthquakes.ipynb for a notebook that illustrates this code
import csv
import requests
import io
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# Classes to hold the data
class EarthQuake:
def __init__(self, row):
# Parse earthquake data from USGS
self.timestamp = row[0]
self.lat = float(row[1])
self.lon = float(row[2])
try:
self.magnitude = float(row[4])
except ValueError:
self.magnitude = 0
def get_earthquake_data(url):
# Read CSV earthquake data from USGS
response = requests.get(url)
csvio = io.StringIO(response.text)
reader = csv.reader(csvio)
header = next(reader)
quakes = [EarthQuake(row) for row in reader]
quakes = [q for q in quakes if q.magnitude > 0]
return quakes
# control marker color and size based on magnitude
def get_marker(magnitude):
markersize = magnitude * 2.5;
if magnitude < 1.0:
return ('bo'), markersize
if magnitude < 3.0:
return ('go'), markersize
elif magnitude < 5.0:
return ('yo'), markersize
else:
return ('ro'), markersize
def create_png(url, outfile):
quakes = get_earthquake_data('http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv')
print(quakes[0].__dict__)
# Set up Basemap
mpl.rcParams['figure.figsize'] = '16, 12'
m = Basemap(projection='kav7', lon_0=-90, resolution = 'l', area_thresh = 1000.0)
m.drawcoastlines()
m.drawcountries()
m.drawmapboundary(fill_color='0.3')
m.drawparallels(np.arange(-90.,99.,30.))
junk = m.drawmeridians(np.arange(-180.,180.,60.))
# sort earthquakes by magnitude so that weaker earthquakes
# are plotted after (i.e. on top of) stronger ones
# the stronger quakes have bigger circles, so we'll see both
start_day = quakes[-1].timestamp[:10]
end_day = quakes[0].timestamp[:10]
quakes.sort(key=lambda q: q.magnitude, reverse=True)
# add earthquake info to the plot
for q in quakes:
x,y = m(q.lon, q.lat)
mcolor, msize = get_marker(q.magnitude)
m.plot(x, y, mcolor, markersize=msize)
# add a title
plt.title("Earthquakes {0} to {1}".format(start_day, end_day))
plt.savefig(outfile)
if __name__ == '__main__':
url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv'
outfile = 'earthquakes.png'
create_png(url, outfile)
| apache-2.0 |
rrohan/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 78 | 34552 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
RRCKI/panda-jedi | pandajedi/jedibrokerage/AtlasProdTaskBroker.py | 1 | 40285 | import re
import sys
import random
import traceback
from pandajedi.jedicore.MsgWrapper import MsgWrapper
from pandajedi.jedicore import Interaction
from TaskBrokerBase import TaskBrokerBase
from pandajedi.jedicore.ThreadUtils import ListWithLock,ThreadPool,WorkerThread,MapWithLock
import AtlasBrokerUtils
from AtlasProdJobBroker import AtlasProdJobBroker
from pandaserver.userinterface import Client as PandaClient
from pandaserver.dataservice import DataServiceUtils
# cannot use pandaserver.taskbuffer while Client is used
from taskbuffer.JobSpec import JobSpec
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
# brokerage for ATLAS production
class AtlasProdTaskBroker (TaskBrokerBase):
# constructor
def __init__(self,taskBufferIF,ddmIF):
TaskBrokerBase.__init__(self,taskBufferIF,ddmIF)
# main to check
def doCheck(self,taskSpecList):
# make logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start doCheck')
# return for failure
retFatal = self.SC_FATAL,{}
retTmpError = self.SC_FAILED,{}
# get list of jediTaskIDs
taskIdList = []
taskSpecMap = {}
for taskSpec in taskSpecList:
taskIdList.append(taskSpec.jediTaskID)
taskSpecMap[taskSpec.jediTaskID] = taskSpec
# check with panda
tmpLog.debug('check with panda')
tmpPandaStatus,cloudsInPanda = PandaClient.seeCloudTask(taskIdList)
if tmpPandaStatus != 0:
tmpLog.error('failed to see clouds')
return retTmpError
# make return map
retMap = {}
for tmpTaskID,tmpCoreName in cloudsInPanda.iteritems():
tmpLog.debug('jediTaskID={0} -> {1}'.format(tmpTaskID,tmpCoreName))
if not tmpCoreName in ['NULL','',None]:
taskSpec = taskSpecMap[tmpTaskID]
if taskSpec.useWorldCloud():
# get destinations for WORLD cloud
ddmIF = self.ddmIF.getInterface(taskSpec.vo)
# get site
siteSpec = self.siteMapper.getSite(tmpCoreName)
# get nucleus
nucleus = siteSpec.pandasite
# get output/log datasets
tmpStat,tmpDatasetSpecs = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(tmpTaskID,['output','log'])
# get destinations
retMap[tmpTaskID] = {'datasets':[],'nucleus':nucleus}
for datasetSpec in tmpDatasetSpecs:
# skip distributed datasets
if DataServiceUtils.getDistributedDestination(datasetSpec.storageToken) != None:
continue
# get token
token = ddmIF.convertTokenToEndpoint(siteSpec.ddm,datasetSpec.storageToken)
# use default endpoint
if token == None:
token = siteSpec.ddm
# add origianl token
if not datasetSpec.storageToken in ['',None]:
token += '/{0}'.format(datasetSpec.storageToken)
retMap[tmpTaskID]['datasets'].append({'datasetID':datasetSpec.datasetID,
'token':'dst:{0}'.format(token),
'destination':tmpCoreName})
else:
retMap[tmpTaskID] = tmpCoreName
tmpLog.debug('ret {0}'.format(str(retMap)))
# return
tmpLog.debug('done')
return self.SC_SUCCEEDED,retMap
# main to assign
def doBrokerage(self,inputList,vo,prodSourceLabel,workQueue):
# list with a lock
inputListWorld = ListWithLock([])
# variables for submission
maxBunchTask = 100
# make logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start doBrokerage')
# return for failure
retFatal = self.SC_FATAL
retTmpError = self.SC_FAILED
tmpLog.debug('vo={0} label={1} queue={2} nTasks={3}'.format(vo,prodSourceLabel,
workQueue.queue_name,
len(inputList)))
# loop over all tasks
allRwMap = {}
prioMap = {}
tt2Map = {}
expRWs = {}
jobSpecList = []
for tmpJediTaskID,tmpInputList in inputList:
for taskSpec,cloudName,inputChunk in tmpInputList:
# collect tasks for WORLD
if taskSpec.useWorldCloud():
inputListWorld.append((taskSpec,inputChunk))
continue
# make JobSpec to be submitted for TaskAssigner
jobSpec = JobSpec()
jobSpec.taskID = taskSpec.jediTaskID
jobSpec.jediTaskID = taskSpec.jediTaskID
# set managed to trigger TA
jobSpec.prodSourceLabel = 'managed'
jobSpec.processingType = taskSpec.processingType
jobSpec.workingGroup = taskSpec.workingGroup
jobSpec.metadata = taskSpec.processingType
jobSpec.assignedPriority = taskSpec.taskPriority
jobSpec.currentPriority = taskSpec.currentPriority
jobSpec.maxDiskCount = (taskSpec.getOutDiskSize() + taskSpec.getWorkDiskSize()) / 1024 / 1024
if taskSpec.useWorldCloud():
# use destinationSE to trigger task brokerage in WORLD cloud
jobSpec.destinationSE = taskSpec.cloud
prodDBlock = None
setProdDBlock = False
for datasetSpec in inputChunk.getDatasets():
prodDBlock = datasetSpec.datasetName
if datasetSpec.isMaster():
jobSpec.prodDBlock = datasetSpec.datasetName
setProdDBlock = True
for fileSpec in datasetSpec.Files:
tmpInFileSpec = fileSpec.convertToJobFileSpec(datasetSpec)
jobSpec.addFile(tmpInFileSpec)
# use secondary dataset name as prodDBlock
if setProdDBlock == False and prodDBlock != None:
jobSpec.prodDBlock = prodDBlock
# append
jobSpecList.append(jobSpec)
prioMap[jobSpec.taskID] = jobSpec.currentPriority
tt2Map[jobSpec.taskID] = jobSpec.processingType
# get RW for a priority
if not allRwMap.has_key(jobSpec.currentPriority):
tmpRW = self.taskBufferIF.calculateRWwithPrio_JEDI(vo,prodSourceLabel,workQueue,
jobSpec.currentPriority)
if tmpRW == None:
tmpLog.error('failed to calculate RW with prio={0}'.format(jobSpec.currentPriority))
return retTmpError
allRwMap[jobSpec.currentPriority] = tmpRW
# get expected RW
expRW = self.taskBufferIF.calculateTaskRW_JEDI(jobSpec.jediTaskID)
if expRW == None:
tmpLog.error('failed to calculate RW for jediTaskID={0}'.format(jobSpec.jediTaskID))
return retTmpError
expRWs[jobSpec.taskID] = expRW
# for old clouds
if jobSpecList != []:
# get fullRWs
fullRWs = self.taskBufferIF.calculateRWwithPrio_JEDI(vo,prodSourceLabel,None,None)
if fullRWs == None:
tmpLog.error('failed to calculate full RW')
return retTmpError
# set metadata
for jobSpec in jobSpecList:
rwValues = allRwMap[jobSpec.currentPriority]
jobSpec.metadata = "%s;%s;%s;%s;%s;%s" % (jobSpec.metadata,
str(rwValues),str(expRWs),
str(prioMap),str(fullRWs),
str(tt2Map))
tmpLog.debug('run task assigner for {0} tasks'.format(len(jobSpecList)))
nBunchTask = 0
while nBunchTask < len(jobSpecList):
# get a bunch
jobsBunch = jobSpecList[nBunchTask:nBunchTask+maxBunchTask]
strIDs = 'jediTaskID='
for tmpJobSpec in jobsBunch:
strIDs += '{0},'.format(tmpJobSpec.taskID)
strIDs = strIDs[:-1]
tmpLog.debug(strIDs)
# increment index
nBunchTask += maxBunchTask
# run task brokerge
stS,outSs = PandaClient.runTaskAssignment(jobsBunch)
tmpLog.debug('{0}:{1}'.format(stS,str(outSs)))
# for WORLD
if len(inputListWorld) > 0:
# thread pool
threadPool = ThreadPool()
# get full RW for WORLD
fullRWs = self.taskBufferIF.calculateWorldRWwithPrio_JEDI(vo,prodSourceLabel,None,None)
if fullRWs == None:
tmpLog.error('failed to calculate full WORLD RW')
return retTmpError
# get RW per priority
for taskSpec,inputChunk in inputListWorld:
if not taskSpec.currentPriority in allRwMap:
tmpRW = self.taskBufferIF.calculateWorldRWwithPrio_JEDI(vo,prodSourceLabel,workQueue,
taskSpec.currentPriority)
if tmpRW == None:
tmpLog.error('failed to calculate RW with prio={0}'.format(taskSpec.currentPriority))
return retTmpError
allRwMap[taskSpec.currentPriority] = tmpRW
# live counter for RWs
liveCounter = MapWithLock(allRwMap)
# make workers
ddmIF = self.ddmIF.getInterface(vo)
for iWorker in range(4):
thr = AtlasProdTaskBrokerThread(inputListWorld,threadPool,
self.taskBufferIF,ddmIF,
fullRWs,liveCounter,
workQueue)
thr.start()
threadPool.join(60*10)
# return
tmpLog.debug('doBrokerage done')
return self.SC_SUCCEEDED
# check file availability
def findMissingFiles(self,jediTaskID,cloudName):
tmpLog = MsgWrapper(logger,'<jediTaskID={0}>'.format(jediTaskID))
tmpLog.debug('start findMissingFiles')
# return for failure
retError = self.SC_FAILED
# get datasets
tmpSt,datasetSpecList = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(jediTaskID,['input'],True)
if not tmpSt:
tmpLog.error('failed to get the list of datasets')
return retError
# loop over all datasets
for datasetSpec in datasetSpecList:
# check only master dataset
if not datasetSpec.isMaster():
continue
tmpLog.debug('checking {0}'.format(datasetSpec.datasetName))
# get ddmIF
ddmIF = self.ddmIF.getInterface(datasetSpec.vo)
if ddmIF == None:
tmpLog.error('failed to get DDM I/F for vo={0}'.format(datasetSpec.vo))
return retError
# get the list of sites where data is available
tmpSt,tmpRet = AtlasBrokerUtils.getSitesWithData(self.siteMapper,ddmIF,
datasetSpec.datasetName)
if tmpSt != self.SC_SUCCEEDED:
tmpLog.error('failed to get the list of sites where {0} is available, since {1}'.format(datasetSpec.datasetName,
tmpRet))
return retError
dataSiteMap = tmpRet
# data is unavailable in cloud
if not dataSiteMap.has_key(cloudName):
tmpLog.error('{0} is unavailable in cloud={1} map={2}'.format(datasetSpec.datasetName,cloudName,str(dataSiteMap)))
return retError
# mapping between sites and storage endpoints
checkedSites = [self.siteMapper.getCloud(cloudName)['source']]+dataSiteMap[cloudName]['t2']
siteStorageEP = AtlasBrokerUtils.getSiteStorageEndpointMap(checkedSites,self.siteMapper)
# get available files per site/endpoint
tmpAvFileMap = ddmIF.getAvailableFiles(datasetSpec,
siteStorageEP,
self.siteMapper,
ngGroup=[1],
checkLFC=True)
if tmpAvFileMap == None:
tmpLog.error('failed to get available file list for {0}'.format(datasetSpec.datasetName))
return retError
# check availability
missingFiles = []
for fileSpec in datasetSpec.Files:
fileFound = False
for tmpSiteName,availableFilesMap in tmpAvFileMap.iteritems():
for tmpStorageType,availableFiles in availableFilesMap.iteritems():
for availableFile in availableFiles:
if fileSpec.lfn == availableFile.lfn:
fileFound = True
break
if fileFound:
break
if fileFound:
break
# missing
if not fileFound:
missingFiles.append(fileSpec.fileID)
tmpLog.debug('{0} missing'.format(fileSpec.lfn))
# update contents
if missingFiles != []:
tmpSt = self.taskBufferIF.setMissingFiles_JEDI(jediTaskID,datasetSpec.datasetID,missingFiles)
if not tmpSt:
tmpLog.error('failed to set missing files in {0}'.format(datasetSpec.datasetName))
return retError
tmpLog.debug('done findMissingFiles')
return self.SC_SUCCEEDED
# thread for real worker
class AtlasProdTaskBrokerThread (WorkerThread):
# constructor
def __init__(self,inputList,threadPool,taskbufferIF,ddmIF,
fullRW,prioRW,workQueue):
# initialize woker with no semaphore
WorkerThread.__init__(self,None,threadPool,logger)
# attributres
self.inputList = inputList
self.taskBufferIF = taskbufferIF
self.ddmIF = ddmIF
self.msgType = 'taskbrokerage'
self.fullRW = fullRW
self.prioRW = prioRW
self.numTasks = 0
self.workQueue = workQueue
# wrapper for return
def sendLogMessage(self,tmpLog):
# send info to logger
tmpLog.bulkSendMsg('taskbrokerage',loggerName='bamboo')
tmpLog.debug('sent')
# main function
def runImpl(self):
# cutoff for disk in TB
diskThreshold = self.taskBufferIF.getConfigValue(self.msgType, 'DISK_THRESHOLD_{0}'.format(self.workQueue.queue_name),
'jedi', 'atlas')
if diskThreshold is None:
diskThreshold = 100 * 1024
# dataset type to ignore file availability check
datasetTypeToSkipCheck = ['log']
# thresholds for data availability check
thrInputSize = self.taskBufferIF.getConfigValue(self.msgType, 'INPUT_SIZE_THRESHOLD', 'jedi', 'atlas')
if thrInputSize is None:
thrInputSize = 1
thrInputSize *= 1024*1024*1024
thrInputNum = self.taskBufferIF.getConfigValue(self.msgType, 'INPUT_NUM_THRESHOLD', 'jedi', 'atlas')
if thrInputNum is None:
thrInputNum = 100
thrInputSizeFrac = self.taskBufferIF.getConfigValue(self.msgType, 'INPUT_SIZE_FRACTION', 'jedi', 'atlas')
if thrInputSizeFrac is None:
thrInputSizeFrac = 10
thrInputSizeFrac = float(thrInputSizeFrac) / 100
thrInputNumFrac = self.taskBufferIF.getConfigValue(self.msgType, 'INPUT_NUM_FRACTION', 'jedi', 'atlas')
if thrInputNumFrac is None:
thrInputNumFrac = 10
thrInputNumFrac = float(thrInputNumFrac) / 100
cutOffRW = 50
negWeightTape = 0.001
# main
lastJediTaskID = None
siteMapper = self.taskBufferIF.getSiteMapper()
while True:
try:
taskInputList = self.inputList.get(1)
# no more datasets
if len(taskInputList) == 0:
self.logger.debug('{0} terminating after processing {1} tasks since no more inputs '.format(self.__class__.__name__,
self.numTasks))
return
# loop over all tasks
for taskSpec,inputChunk in taskInputList:
lastJediTaskID = taskSpec.jediTaskID
# make logger
tmpLog = MsgWrapper(self.logger,'<jediTaskID={0}>'.format(taskSpec.jediTaskID),monToken='jediTaskID={0}'.format(taskSpec.jediTaskID))
tmpLog.debug('start')
tmpLog.info('thrInputSize:{0} thrInputNum:{1} thrInputSizeFrac:{2} thrInputNumFrac;{3}'.format(thrInputSize,
thrInputNum,
thrInputSizeFrac,
thrInputNumFrac))
# RW
taskRW = self.taskBufferIF.calculateTaskWorldRW_JEDI(taskSpec.jediTaskID)
# get nuclei
nucleusList = siteMapper.nuclei
if taskSpec.nucleus in nucleusList:
candidateNucleus = taskSpec.nucleus
else:
tmpLog.info('got {0} candidates'.format(len(nucleusList)))
######################################
# check status
newNucleusList = {}
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
if not tmpNucleusSpec.state in ['ACTIVE']:
tmpLog.info(' skip nucleus={0} due to status={1} criteria=-status'.format(tmpNucleus,
tmpNucleusSpec.state))
else:
newNucleusList[tmpNucleus] = tmpNucleusSpec
nucleusList = newNucleusList
tmpLog.info('{0} candidates passed status check'.format(len(nucleusList)))
if nucleusList == {}:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
######################################
# check status of transfer backlog
t1Weight = taskSpec.getT1Weight()
if t1Weight < 0:
tmpLog.info('skip transfer backlog check due to negative T1Weight')
else:
newNucleusList = {}
backlogged_nuclei = self.taskBufferIF.getBackloggedNuclei()
for tmpNucleus, tmpNucleusSpec in nucleusList.iteritems():
if tmpNucleus in backlogged_nuclei:
tmpLog.info(' skip nucleus={0} due to long transfer backlog criteria=-transfer_backlog'.
format(tmpNucleus))
else:
newNucleusList[tmpNucleus] = tmpNucleusSpec
nucleusList = newNucleusList
tmpLog.info('{0} candidates passed transfer backlog check'.format(len(nucleusList)))
if nucleusList == {}:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
######################################
# check endpoint
fractionFreeSpace = {}
newNucleusList = {}
tmpStat,tmpDatasetSpecList = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(taskSpec.jediTaskID,
['output','log'])
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
toSkip = False
for tmpDatasetSpec in tmpDatasetSpecList:
# ignore distributed datasets
if DataServiceUtils.getDistributedDestination(tmpDatasetSpec.storageToken) != None:
continue
# get endpoint with the pattern
tmpEP = tmpNucleusSpec.getAssoicatedEndpoint(tmpDatasetSpec.storageToken)
if tmpEP == None:
tmpLog.info(' skip nucleus={0} since no endpoint with {1} criteria=-match'.format(tmpNucleus,
tmpDatasetSpec.storageToken))
toSkip = True
break
# check state
"""
if not tmpEP['state'] in ['ACTIVE']:
tmpLog.info(' skip nucleus={0} since endpoint {1} is in {2} criteria=-epstatus'.format(tmpNucleus,
tmpEP['ddm_endpoint_name'],
tmpEP['state']))
toSkip = True
break
"""
# check space
tmpSpaceSize = tmpEP['space_free'] + tmpEP['space_expired']
tmpSpaceToUse = 0
if tmpNucleus in self.fullRW:
# 0.25GB per cpuTime/corePower/day
tmpSpaceToUse = long(self.fullRW[tmpNucleus]/10/24/3600*0.25)
if tmpSpaceSize-tmpSpaceToUse < diskThreshold:
tmpLog.info(' skip nucleus={0} since disk shortage (free {1} - reserved {2} < thr {3}) at endpoint {4} criteria=-space'.format(tmpNucleus,
tmpSpaceSize,
tmpSpaceToUse,
diskThreshold,
tmpEP['ddm_endpoint_name']))
toSkip = True
break
# keep fraction of free space
if not tmpNucleus in fractionFreeSpace:
fractionFreeSpace[tmpNucleus] = {'total':0,'free':0}
try:
tmpOld = float(fractionFreeSpace[tmpNucleus]['free']) / \
float(fractionFreeSpace[tmpNucleus]['total'])
except:
tmpOld = None
try:
tmpNew = float(tmpSpaceSize-tmpSpaceToUse)/float(tmpEP['space_total'])
except:
tmpNew = None
if tmpNew != None and (tmpOld == None or tmpNew < tmpOld):
fractionFreeSpace[tmpNucleus] = {'total':tmpEP['space_total'],
'free':tmpSpaceSize-tmpSpaceToUse}
if not toSkip:
newNucleusList[tmpNucleus] = tmpNucleusSpec
nucleusList = newNucleusList
tmpLog.info('{0} candidates passed endpoint check {1} TB'.format(len(nucleusList),diskThreshold/1024))
if nucleusList == {}:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
######################################
# ability to execute jobs
newNucleusList = {}
# get all panda sites
tmpSiteList = []
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
tmpSiteList += tmpNucleusSpec.allPandaSites
tmpSiteList = list(set(tmpSiteList))
tmpLog.debug('===== start for job check')
jobBroker = AtlasProdJobBroker(self.ddmIF,self.taskBufferIF)
tmpSt,tmpRet = jobBroker.doBrokerage(taskSpec,taskSpec.cloud,inputChunk,None,True,
tmpSiteList,tmpLog)
tmpLog.debug('===== done for job check')
if tmpSt != Interaction.SC_SUCCEEDED:
tmpLog.error('no sites can run jobs')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
okNuclei = set()
for tmpSite in tmpRet:
siteSpec = siteMapper.getSite(tmpSite)
okNuclei.add(siteSpec.pandasite)
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
if tmpNucleus in okNuclei:
newNucleusList[tmpNucleus] = tmpNucleusSpec
else:
tmpLog.info(' skip nucleus={0} due to missing ability to run jobs criteria=-job'.format(tmpNucleus))
nucleusList = newNucleusList
tmpLog.info('{0} candidates passed job check'.format(len(nucleusList)))
if nucleusList == {}:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
######################################
# data locality
toSkip = False
availableData = {}
for datasetSpec in inputChunk.getDatasets():
# only for real datasets
if datasetSpec.isPseudo():
continue
# ignore DBR
if DataServiceUtils.isDBR(datasetSpec.datasetName):
continue
# skip locality check
if DataServiceUtils.getDatasetType(datasetSpec.datasetName) in datasetTypeToSkipCheck:
continue
# use deep scan for primary dataset
if datasetSpec.isMaster():
deepScan = True
else:
deepScan = False
# get nuclei where data is available
tmpSt,tmpRet = AtlasBrokerUtils.getNucleiWithData(siteMapper,self.ddmIF,
datasetSpec.datasetName,
nucleusList.keys(),
deepScan)
if tmpSt != Interaction.SC_SUCCEEDED:
tmpLog.error('failed to get nuclei where data is available, since {0}'.format(tmpRet))
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
toSkip = True
break
# sum
for tmpNucleus,tmpVals in tmpRet.iteritems():
if not tmpNucleus in availableData:
availableData[tmpNucleus] = tmpVals
else:
availableData[tmpNucleus] = dict((k,v+tmpVals[k]) for (k,v) in availableData[tmpNucleus].iteritems())
if toSkip:
continue
if availableData != {}:
newNucleusList = {}
# skip if no data
skipMsgList = []
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
if len(nucleusList) == 1:
tmpLog.info(' disable data locality check for nucleus={0} since no other candidate'.format(tmpNucleus))
newNucleusList[tmpNucleus] = tmpNucleusSpec
elif availableData[tmpNucleus]['tot_size'] > thrInputSize and \
availableData[tmpNucleus]['ava_size_any'] < availableData[tmpNucleus]['tot_size'] * thrInputSizeFrac:
tmpMsg = ' skip nucleus={0} due to insufficient input size {1}B < {2}*{3} criteria=-insize'.format(tmpNucleus,
availableData[tmpNucleus]['ava_size_any'],
availableData[tmpNucleus]['tot_size'],
thrInputSizeFrac)
skipMsgList.append(tmpMsg)
elif availableData[tmpNucleus]['tot_num'] > thrInputNum and \
availableData[tmpNucleus]['ava_num_any'] < availableData[tmpNucleus]['tot_num'] * thrInputNumFrac:
tmpMsg = ' skip nucleus={0} due to short number of input files {1} < {2}*{3} criteria=-innum'.format(tmpNucleus,
availableData[tmpNucleus]['ava_num_any'],
availableData[tmpNucleus]['tot_num'],
thrInputNumFrac)
skipMsgList.append(tmpMsg)
else:
newNucleusList[tmpNucleus] = tmpNucleusSpec
if len(newNucleusList) > 0:
nucleusList = newNucleusList
for tmpMsg in skipMsgList:
tmpLog.info(tmpMsg)
else:
tmpLog.info(' disable data locality check since no nucleus has input data')
tmpLog.info('{0} candidates passed data check'.format(len(nucleusList)))
if nucleusList == {}:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
######################################
# weight
self.prioRW.acquire()
nucleusRW = self.prioRW[taskSpec.currentPriority]
self.prioRW.release()
totalWeight = 0
nucleusweights = []
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
if not tmpNucleus in nucleusRW:
nucleusRW[tmpNucleus] = 0
wStr = '1'
# with RW
if tmpNucleus in nucleusRW and nucleusRW[tmpNucleus] >= cutOffRW:
weight = 1 / float(nucleusRW[tmpNucleus])
wStr += '/( RW={0} )'.format(nucleusRW[tmpNucleus])
else:
weight = 1
wStr += '/(1 : RW={0}<{1})'.format(nucleusRW[tmpNucleus],cutOffRW)
# with data
if availableData != {}:
if availableData[tmpNucleus]['tot_size'] > 0:
weight *= float(availableData[tmpNucleus]['ava_size_any'])
weight /= float(availableData[tmpNucleus]['tot_size'])
wStr += '* ( available_input_size_DISKTAPE={0} )'.format(availableData[tmpNucleus]['ava_size_any'])
wStr += '/ ( total_input_size={0} )'.format(availableData[tmpNucleus]['tot_size'])
# negative weight for tape
if availableData[tmpNucleus]['ava_size_any'] > availableData[tmpNucleus]['ava_size_disk']:
weight *= negWeightTape
wStr += '*( weight_TAPE={0} )'.format(negWeightTape)
# fraction of free space
if tmpNucleus in fractionFreeSpace:
try:
tmpFrac = float(fractionFreeSpace[tmpNucleus]['free']) / \
float(fractionFreeSpace[tmpNucleus]['total'])
weight *= tmpFrac
wStr += '*( free_space={0} )/( total_space={1} )'.format(fractionFreeSpace[tmpNucleus]['free'],
fractionFreeSpace[tmpNucleus]['total'])
except:
pass
tmpLog.info(' use nucleus={0} weight={1} {2} criteria=+use'.format(tmpNucleus,weight,wStr))
totalWeight += weight
nucleusweights.append((tmpNucleus,weight))
tmpLog.info('final {0} candidates'.format(len(nucleusList)))
######################################
# final selection
tgtWeight = random.uniform(0,totalWeight)
candidateNucleus = None
for tmpNucleus,weight in nucleusweights:
tgtWeight -= weight
if tgtWeight <= 0:
candidateNucleus = tmpNucleus
break
if candidateNucleus == None:
candidateNucleus = nucleusweights[-1][0]
######################################
# update
nucleusSpec = nucleusList[candidateNucleus]
# get output/log datasets
tmpStat,tmpDatasetSpecs = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(taskSpec.jediTaskID,
['output','log'])
# get destinations
retMap = {taskSpec.jediTaskID: AtlasBrokerUtils.getDictToSetNucleus(nucleusSpec,tmpDatasetSpecs)}
tmpRet = self.taskBufferIF.setCloudToTasks_JEDI(retMap)
tmpLog.info(' set nucleus={0} with {1} criteria=+set'.format(candidateNucleus,tmpRet))
self.sendLogMessage(tmpLog)
if tmpRet:
tmpMsg = 'set task.status=ready'
tmpLog.info(tmpMsg)
tmpLog.sendMsg(tmpMsg,self.msgType)
# update RW table
self.prioRW.acquire()
for prio,rwMap in self.prioRW.iteritems():
if prio > taskSpec.currentPriority:
continue
if candidateNucleus in rwMap:
rwMap[candidateNucleus] += taskRW
else:
rwMap[candidateNucleus] = taskRW
self.prioRW.release()
except:
errtype,errvalue = sys.exc_info()[:2]
errMsg = '{0}.runImpl() failed with {1} {2} '.format(self.__class__.__name__,errtype.__name__,errvalue)
errMsg += 'lastJediTaskID={0} '.format(lastJediTaskID)
errMsg += traceback.format_exc()
logger.error(errMsg)
| apache-2.0 |
jorge2703/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
etkirsch/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
igabr/Metis_Projects_Chicago_2017 | 05-project-kojack/filters.py | 1 | 7976 | import pandas as pd
import numpy as np
import string
from nltk.corpus import stopwords
nltk_stopwords = stopwords.words("english")+["rt", "via","-»","--»","--","---","-->","<--","->","<-","«--","«","«-","»","«»", " →", "→"]
punc = '!"%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
def filtration_1(dataframe, column_to_clean, new_col):
cleaned_tweets = []
for label in dataframe.index:
tweet = dataframe.loc[label, :][column_to_clean]
tweet = tweet.lower()
clean = [x for x in tweet.split() if x not in string.punctuation]
clean = [x for x in clean if x not in nltk_stopwords]
clean = [x for x in clean if "@" not in x]
clean = [x for x in clean if "฿" not in x]
clean = [x for x in clean if x[0] not in string.digits]
clean = [x for x in clean if x[0] not in punc]
clean = [x for x in clean if len(x) != 1]
clean = " ".join(clean)
clean = clean.strip()
cleaned_tweets.append(clean)
dataframe[new_col] = cleaned_tweets
return dataframe
def filtration_2(dataframe, column):
# clean = list(map(lambda x: x.replace("#", ""), clean)) #we want to maintain hashtags!
dataframe[column] = dataframe[column].apply(lambda x: x.replace('"', ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("♬♫♩♪", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("…", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(".",""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("⋆", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ⋆ ", " "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("#rt", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("#re", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" alime ", " all time "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" alltime ", " all time "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" →", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("alime", "all time "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("atm", "at the moment"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ath ", " all time high "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("str8", "straight"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" v ", " very "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" #d", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ddos ", " distributed denial of service "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("btce", "btc"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("bitcoina", "bitcoin"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("rbitcoin", "bitcoin"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" – ", " "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("->", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ➤ ", " "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("◄►", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("◄", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ur ", " your "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" u ", " you "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("forthen", "for then"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(">", "greater than"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("<", "less than"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("lt", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("gt", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(":", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("&", "and"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ampamp", "and"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("amp", "and"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" amp ", " and "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" bu ", " but "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("/", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("...", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("(", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(")", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("“", '"'))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("”", '"'))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("‘", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("’", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("-"," "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("*", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("!", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("⬛️", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\u200d", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\U0001f986", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\U0001f942", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\U0001f92f", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\U0001f911", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\U0001F193", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ⭕ ", " "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("🤔", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("☞ ", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("[", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("]", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("{", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("}", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ô", "o"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ó", "o"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("é", "e"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ï","i"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("®", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("á", "a"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ã", "a"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ç", "c"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" jan ", " january "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" feb ", " february "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" mar ", " march "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" apr ", " april "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" jun ", " june "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" jul ", " july "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" aug ", " august "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" sept ", " september "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" oct ", " october "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" nov ", " november "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" dec ", " december "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("washinon", "washington"))
return dataframe
| mit |
JDReutt/BayesDB | bayesdb/select_utils.py | 2 | 6044 | #
# Copyright (c) 2010-2014, MIT Probabilistic Computing Project
#
# Lead Developers: Jay Baxter and Dan Lovell
# Authors: Jay Baxter, Dan Lovell, Baxter Eaves, Vikash Mansinghka
# Research Leads: Vikash Mansinghka, Patrick Shafto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import utils
import numpy
import os
import pylab
import matplotlib.cm
import inspect
import operator
import ast
import string
import utils
import functions
import data_utils as du
from pyparsing import *
import bayesdb.bql_grammar as bql_grammar
def evaluate_where_on_row(row_idx, row, where_conditions, M_c, M_c_full, X_L_list, X_D_list, T, T_full, engine, tablename, numsamples, impute_confidence):
"""
Helper function that applies WHERE conditions to row, returning False if row doesn't satisfy where
clause, and the list of function results if it does satisfy the where clause.
"""
function_values = []
for (func, f_args, op, val) in where_conditions:
if func == functions._column and f_args[1] != None and numpy.isnan(T[row_idx][f_args[0]]):
col_idx = f_args[0]
confidence = f_args[1]
## need to do predictive sampling to evaluate where condition with confidence
## TODO: easier way to do this would be to call impute on backend, but would need to change
## crosscat so that impute_and_confidence could also return the original samples, or evaluate
## a whereclause.
Y = [(row_idx, cidx, row[cidx]) for cidx in M_c['name_to_idx'].values() \
if not numpy.isnan(T[row_idx][cidx])]
samples = engine.call_backend('simple_predictive_sample',
dict(M_c=M_c, X_L=X_L_list, X_D=X_D_list, Y=Y, Q=[[row_idx,col_idx]], n=numsamples))
samples_satisfying_where = 0
for sample in samples:
value = du.convert_code_to_value(M_c, col_idx, sample[0])
if op(value, val):
samples_satisfying_where += 1
if float(samples_satisfying_where)/len(samples) >= confidence:
# Where clause is satisfied! Now, generate impute summary.
imputed_code, imputation_confidence = utils.get_imputation_and_confidence_from_samples(
M_c, X_L_list[0], col_idx, samples)
if imputed_code is not None:
imputed_value = du.convert_code_to_value(M_c, col_idx, imputed_code)
else:
imputed_value = T[row_idx][col_idx]
function_values.append(imputed_value)
else:
return False
else:
if func != functions._column_ignore:
where_value = func(f_args, row_idx, row, M_c, X_L_list, X_D_list, T, engine, numsamples)
else:
where_value = func(f_args, row_idx, row, M_c_full, T_full, engine)
if func == functions._row_id:
# val should be a row list name in this case. look up the row list, and set val to be the list of
# row indices in the row list. Throws BayesDBRowListDoesNotExistError if row list does not exist.
val = engine.persistence_layer.get_row_list(tablename, val)
if op(val, where_value): # for operator.contains, op(a,b) means 'b in a': so need to switch args.
function_values.append(where_value)
else:
return False
else:
# Normal, most common condition.
if op(where_value, val):
function_values.append(where_value)
else:
return False
return function_values
def convert_row_from_codes_to_values(row, M_c):
"""
Helper function to convert a row from its 'code' (as it's stored in T) to its 'value'
(the human-understandable value).
"""
ret = []
for cidx, code in enumerate(row):
if not du.flexible_isnan(code):
ret.append(du.convert_code_to_value(M_c, cidx, code))
else:
ret.append(code)
return tuple(ret)
def check_if_functions_need_models(queries, tablename, order_by, where_conditions):
"""
If there are no models, make sure that we aren't using functions that require models.
TODO: make this less hardcoded
"""
blacklisted_functions = [functions._similarity, functions._row_typicality, functions._col_typicality, functions._probability]
used_functions = [q[0] for q in queries] + [w[0] for w in where_conditions] + [x[0] for x in order_by]
for bf in blacklisted_functions:
if bf in queries:
raise utils.BayesDBNoModelsError(tablename)
def compute_result_and_limit(rows, limit, queries, M_c, X_L_list, X_D_list, T, engine, numsamples):
data = []
row_count = 0
# Compute aggregate functions just once, then cache them.
aggregate_cache = dict()
for query_idx, (query_function, query_args, aggregate) in enumerate(queries):
if aggregate:
aggregate_cache[query_idx] = query_function(query_args, None, None, M_c, X_L_list, X_D_list, T, engine, numsamples)
# Only return one row if all aggregate functions (row_id will never be aggregate, so subtract 1 and don't return it).
assert queries[0][0] == functions._row_id
if len(aggregate_cache) == len(queries) - 1:
limit = 1
# Iterate through data table, calling each query_function to fill in the output values.
for row_id, row_values in rows:
ret_row = []
for query_idx, (query_function, query_args, aggregate) in enumerate(queries):
if aggregate:
ret_row.append(aggregate_cache[query_idx])
else:
ret_row.append(query_function(query_args, row_id, row_values, M_c, X_L_list, X_D_list, T, engine, numsamples))
data.append(tuple(ret_row))
row_count += 1
if row_count >= limit:
break
return data
| apache-2.0 |
elijah513/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/ensemble/forest.py | 14 | 67896 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'subsample', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated in 0.17 and"
"will be removed in 0.19. It was replaced by "
"class_weight='balanced_subsample' using the balanced"
"strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
jgrisham4/nonlinear-gfem | src/plot_results.py | 1 | 1425 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
d_c = np.genfromtxt("coarse.dat")
d_m = np.genfromtxt("medium.dat")
d_f = np.genfromtxt("fine.dat")
d_e = np.genfromtxt("exact.dat")
plt.plot(d_e[1:,0], d_e[1:,1], lw=1.5, label="exact")
plt.plot(d_c[1:,0], d_c[1:,1], "o", label="coarse")
plt.plot(d_m[1:,0], d_m[1:,1], "s", label="medium", markevery=4)
plt.plot(d_f[1:,0], d_f[1:,1], "v", label="fine", markevery=6)
plt.xlabel("x [m]")
plt.ylabel("T [deg C]")
plt.legend(loc=2)
N = np.array([d_c[0,0], d_m[0,0], d_f[0,0]])
L2 = np.array([d_c[0,1], d_m[0,1], d_f[0,1]])
fig2 = plt.figure()
plt.plot(1.0/N, L2, "ok")
plt.xscale("log")
plt.yscale("log")
slope, intercept, r_value, p_value, std_err = stats.linregress(np.log10(1.0/N), np.log10(L2))
print("Order of accuracy: {}".format(slope))
dfdm = np.genfromtxt("fdm.dat")
dcvm = np.genfromtxt("cvm.dat")
dsacvm = np.genfromtxt("sacvm.dat")
fig3 = plt.figure()
plt.plot(dfdm[:,0], dfdm[:,1], "-k" , lw=1.5)
plt.plot(dfdm[:,0], dfdm[:,2], "-b" , lw=1.5)
plt.plot(dfdm[:,0], dfdm[:,3], "-r" , lw=1.5)
plt.plot(dcvm[:,0], dcvm[:,1], "--r" , lw=1.5)
plt.plot(dcvm[:,0], dcvm[:,2], "--k" , lw=1.5)
plt.plot(dcvm[:,0], dcvm[:,3], "--b" , lw=1.5)
plt.plot(dsacvm[:,0], dsacvm[:,1], "or" , lw=1.5)
plt.plot(dsacvm[:,0], dsacvm[:,2], "ok" , lw=1.5)
plt.plot(dsacvm[:,0], dsacvm[:,3], "ob" , lw=1.5)
#plt.legend(loc=2)
plt.show()
| gpl-3.0 |
efiring/numpy-work | doc/example.py | 1 | 3497 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
describe : type
Explanation
output
Explanation
tuple
Explanation
items
even more explaining
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
<BLANKLINE>
b
"""
pass
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/stats/_stats_mstats_common.py | 1 | 8448 | from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
def linregress(x, y=None):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimated gradient.
See also
--------
:func:`scipy.optimize.curve_fit` : Use non-linear
least squares to fit a function to data.
:func:`scipy.optimize.leastsq` : Minimize the sum of
squares of a set of equations.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
('r-squared:', 0.080402268539028335)
Plot the data along with the fitted line
>>> plt.plot(x, y, 'o', label='original data')
>>> plt.plot(x, intercept + slope*x, 'r', label='fitted line')
>>> plt.legend()
>>> plt.show()
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY) * (1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope * xmean
sterrest = np.sqrt((1 - r ** 2) * ssym / ssxm / df)
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1 / 18. * (ny * (ny - 1) * (2 * ny + 5) -
np.sum(k * (k - 1) * (2 * k + 5) for k in nxreps) -
np.sum(k * (k - 1) * (2 * k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z * sigma) / 2.)), len(slopes) - 1)
Rl = max(int(np.round((nt + z * sigma) / 2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
| mit |
alexandrebarachant/decoding-brain-challenge-2016 | utils.py | 2 | 2039 | import numpy as np
from copy import deepcopy
from sklearn.base import BaseEstimator, TransformerMixin
def epoch_data(data, events, stim_ID, tmin=-.2, tmax=0.399):
"""Epoch data."""
ix_events = np.where(np.diff(np.int32(events > 0)) == 1)[0] + 1
ix_min = int(tmin*1000)
ix_max = int(tmax*1000)
nsamp = ix_max - ix_min
X = np.zeros((len(ix_events), data.shape[0], nsamp))
y = np.int32(events[ix_events] > 50)
st_id = np.int32(stim_ID[ix_events])
for i, ix in enumerate(ix_events):
sl = slice((ix + ix_min), (ix + ix_max))
tmp = data[:, sl]
X[i, :, 0:tmp.shape[1]] = tmp
return X, y, st_id
class DownSampler(BaseEstimator, TransformerMixin):
"""Downsample transformer"""
def __init__(self, factor=4):
"""Init."""
self.factor = factor
def fit(self, X, y):
return self
def transform(self, X):
return X[:, :, ::self.factor]
class EpochsVectorizer(BaseEstimator, TransformerMixin):
"""Vectorize epochs."""
def __init__(self):
"""Init."""
def fit(self, X, y):
return self
def transform(self, X):
X2 = np.array([x.flatten() for x in X])
return X2
class CospBoostingClassifier(BaseEstimator, TransformerMixin):
"""Cospectral matrice bagging."""
def __init__(self, baseclf):
"""Init."""
self.baseclf = baseclf
def fit(self, X, y):
self.clfs_ = []
for i in range(X.shape[-1]):
clf = deepcopy(self.baseclf)
self.clfs_.append(clf.fit(X[:, :, :, i], y))
return self
def predict_proba(self, X):
proba = []
for i in range(X.shape[-1]):
proba.append(self.clfs_[i].predict_proba(X[:, :, :, i]))
proba = np.mean(proba, axis=0)
return proba
def transform(self, X):
proba = []
for i in range(X.shape[-1]):
proba.append(self.clfs_[i].predict_proba(X[:, :, :, i]))
proba = np.concatenate(proba, 1)
return proba
| bsd-3-clause |
kekeller/ultrasound_image | bicep_script.py | 1 | 8623 | import os
import numpy as np # linear algebra
import scipy
from skimage.io import imread
import matplotlib.pyplot as plt
from skimage import segmentation
from skimage.filters import gaussian, median
from skimage.measure import *
from sklearn.cluster import *
from skimage.segmentation import *
from skimage.exposure import *
import matplotlib.patches as patches
from skimage.transform import *
from matplotlib.path import Path
import peakutils
from peakutils.plot import plot as pplot
import string
import csv
# return cropped image
def crop_border(image):
x1,x2 = 120,571
y1,y2 = 60,531
image = image[y1:y2,x1:x2]
return resize(image,[451,441])
def crop_border_ref(image):
x1,x2 = 203,644
y1,y2 = 87,538
return image[y1:y2,x1:x2]
IMAGE_BICEP_TRAIN = './tendon_images/Bicep_Train'
IMAGE_SUP_TRAIN = './tendon_images/Sup_Train'
IMAGE_BICEP = './tendon_images/Biceps'
IMAGE_SUP = './tendon_images/Supraspinatus'
def load_images_names_from_folder(folder):
images = []
name = []
train = []
train_name = []
for filename in os.listdir(folder):
if "ID" in filename and 'REF' not in filename:
img = imread(os.path.join(folder,filename))
images.append(crop_border(img))
name.append(filename)
# if there are training images, load them
ref = filename.replace('.jpg','')+'_REF_SEL.jpg'
if ref in os.listdir(folder):
img = imread(os.path.join(folder,ref))
train.append(crop_border_ref(img))
train_name.append(ref)
return [images,train,name,train_name]
# extract x value to find where the ROI should start
# this looks at the very top of the image to find the imprint of two metal bars on the skin
# these are shown as peaks in the rows and we can localize an x coordinate for the region from these.
# Bicep tendon has bars on the left, sup on the right
# for the Bicep the ROI is then a set distance from the bars, for the sup it varies between patients
def set_x_ROI(image,offset):
image = gaussian(image,sigma=2)
y = []
for i in range(image.shape[1]):
y.insert(i,np.mean(image[0:10,i]))
x = np.linspace(0, image.shape[1], image.shape[1])
y = np.array(y)
indexes = np.array(peakutils.indexes(y, thres=0.8, min_dist=20)) # there should be two peaks it returns
x = ((indexes[1] - indexes[0]))
x = x + offset
return x
# extract initial ROI by sliding window down to find maximum intensity
# in thresholded image
def find_initial_ROI(image,init_x):
maximum = 0
max_x = init_x
max_y = 50
(x1,y1) = (init_x,50) # if there is a brighter region at top from skin then ignore
length = 193 # from training
width = 100
(x2,y2) = (x1+length,y1+width)
# shift down to find brightest region
for j in range (image.shape[1] - width):
x = x1
y = y1 + j
(x2,y2) = (x+length,y+width)
temp = np.sum(image[y:y2, x:x2])
if temp > maximum:
maximum = temp
max_x = x
max_y = y
return max_x,max_y
# fit contour on region based on window on maximum
def fit_contour(image,max_x,max_y,length,width):
s = np.linspace(0, 2*np.pi, 400)
x = (max_x + length/2) + length*np.cos(s)
y = (max_y + width/2) + (width/2)*np.sin(s)
init = np.array([x, y]).T
snake = active_contour(image,init, bc ='fixed', alpha=1.5, beta=1.0, w_line=1, w_edge=1, gamma=0.1)
return snake
# fix x coordinates of snake to be maximum length
def fix_snake(snake,x,length):
for val in snake:
if val[0] < x:
val[0] = x
if val[0] > (x+length):
val[0] = x+length
return snake
# based on the snake coordinates, we can extract the maximim sized
# rectangle for the region
def ROI_from_snake(snake):
s = snake
x = []
y = []
top = []
bottom = []
for i,j in s:
x.append(i)
y.append(j)
for i,j in s:
if (i >= (min(x) + 10)) and (i <= (max(x) - 10)):
if j > int(np.mean(y)):
bottom.append(int(j))
elif j < int(np.mean(y)):
top.append(int(j))
ymin = int(np.mean(y))
ymax = int(np.mean(y))
for i in range(100):
ymin = ymin - 1
if ymin in top:
break
for i in range(100):
ymax = ymax + 1
if ymax in bottom:
break
ROI = [[int(min(x)),ymin],[int(max(x)),ymax] ]
#ROI = [[int(min(x)),max(top)],[max(x),max(bottom)] ]#[[min(x),max(top)],[max(x),min(bottom)] ]
return ROI
# 1. Transform images by equalizing histogram and filtering
# 2. Then threshold the image to get the maximum
# 3. After the inital ROI is extracted based on the brightness
# we fit a snake onto the thresholded image
# 4. Cut the snake so it fits within in the desired length
# set values
thres_val = 0.97
length = 193 #2cm
width = 100
offset = 145 # 145 is about 1.5cm
c = [] # converted images
snake = []
ROI_xy = []
ROI = []
rect = [] # used for plotting
ROI_img = [] # used for F1 extraction
Precision = []
Recall = []
F1 = []
IMAGE_BICEP_TRAIN = raw_input("Enter Bicep Image Folder:" )#'./tendon_images/Bicep_Train'
[images,train,names,train_name] = load_images_names_from_folder(IMAGE_BICEP_TRAIN)
print "Starting Analysis"
# filter and threshold
for i in range(len(images)):
temp = images[i]
temp = equalize_hist(temp)
temp = gaussian(temp,sigma=2)
x = np.max(temp,2)
x[x<thres_val]=0
c.insert(i,x)
print "Calculating the ROI"
# fit snake
for i in range(len(c)):
ROI_x = set_x_ROI(images[i],offset)
[x,y] = find_initial_ROI(c[i],ROI_x)
ROI_xy.insert(i,[x,y])
contour = fit_contour(c[i],ROI_xy[i][0],ROI_xy[i][1],length,width)
snake.insert(i,contour)
snake[i] = fix_snake(snake[i],ROI_xy[i][0],length)
region = ROI_from_snake(snake[i])
ROI.insert(i,region)
# Find F1, precision, and recall scores for the images
# this is done by converting the ROI region and the training image
# to a boolean matrix and checking the overlaps.
if train_name != []:
# build image
for idx in range(len(c)):
image = c[idx]
x1 = ROI[idx][0][0]
x2 = ROI[idx][1][0]
y1 = ROI[idx][0][1]
y2 = ROI[idx][1][1]
# build rectangle path around ROI
p = Path([[x1,y1],[x2,y1],[x2,y2],[x1,y2]])
#p = Path(np.array(snake[idx]))
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if (p.contains_points([[j,i]])):
image[i,j] = 255
else:
image[i,j] = 0
ROI_img.insert(idx,image)
# find f1 score
for idx in range(len(c)):
t = np.max(train[idx],2).astype(bool).ravel() # convert to bool vector
img = ROI_img[idx].astype(bool).ravel()
tp = 0.0
fn = 0.0
tn = 0.0
fp = 0.0
for i in range(len(t)):
if ((t[i] == True) and (img[i] == True)):
tp = tp +1
if ((t[i] == True) and (img[i] == False)):
fn = fn +1
if ((t[i] == False) and (img[i] == False)):
tn = tn +1
if ((t[i] == False) and (img[i] == True)):
fp = fp +1
precision = tp/ (tp + fp)
recall = tp/(tp + fn)
Precision.insert(idx,precision)
Recall.insert(idx,recall)
f1 = 2 * (precision*recall) / (precision + recall)
F1.insert(idx,f1)
print "Saving data"
# Save the data to a csv file
if train_name != []:
label = ['name','ROIx1','ROIy1','ROIx2','ROIy2','tendon_width','F1','Prec','Recall']
data = []
for i in range(len(names)):
x = [names[i].replace('.jpg',''),int(ROI[i][0][0]),
int(ROI[i][0][1]),int(ROI[i][1][0]),int(ROI[i][1][1]),
int(ROI[i][1][1] - ROI[i][0][1]), round(F1[i],2), round(Precision[i],2), round(Recall[i],2) ]
data.insert(i,x)
else:
label = ['name','ROIx1','ROIy1','ROIx2','ROIy2','tendon_width']
data = []
for i in range(len(names)):
x = [names[i].replace('.jpg',''),int(ROI[i][0][0]),
int(ROI[i][0][1]),int(ROI[i][1][0]),int(ROI[i][1][1]),
int(ROI[i][1][1] - ROI[i][0][1])]
data.insert(i,x)
# write label and data
with open('Bicep_Output.csv', 'w') as csvfile3:
datawriter = csv.writer(csvfile3, delimiter=',')
datawriter.writerow(label)
for data in data:
datawriter.writerow(data)
print "All Done! \n" | gpl-3.0 |
mrcslws/htmresearch | projects/capybara/supervised_baseline/v1_no_sequences/plot_utils.py | 9 | 8500 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import itertools
import plotly.offline as py
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import numpy as np
def show_values(pc, fmt="%.2f", **kw):
"""
Heatmap with text in each cell with matplotlib's pyplot
Source: http://stackoverflow.com/a/25074150/395857
By HYRY
"""
from itertools import izip
pc.update_scalarmappable()
ax = pc.axes
for p, color, value in izip(pc.get_paths(), pc.get_facecolors(),
pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
ax.text(x, y, fmt % value, ha="center", va="center", color=color, **kw)
def cm2inch(*tupl):
"""
Specify figure size in centimeter in matplotlib
Source: http://stackoverflow.com/a/22787457/395857
By gns-ank
"""
inch = 2.54
if type(tupl[0]) == tuple:
return tuple(i / inch for i in tupl[0])
else:
return tuple(i / inch for i in tupl)
def heatmap(AUC, title, xlabel, ylabel, xticklabels, yticklabels,
figure_width=40, figure_height=20, correct_orientation=False,
cmap='RdBu'):
"""
Inspired by:
- http://stackoverflow.com/a/16124677/395857
- http://stackoverflow.com/a/25074150/395857
"""
# Plot it out
fig, ax = plt.subplots()
# c = ax.pcolor(AUC, edgecolors='k', linestyle= 'dashed',
# linewidths=0.2, cmap='RdBu', vmin=0.0, vmax=1.0)
c = ax.pcolor(AUC, edgecolors='k', linestyle='dashed', linewidths=0.2,
cmap=cmap)
# put the major ticks at the middle of each cell
ax.set_yticks(np.arange(AUC.shape[0]) + 0.5, minor=False)
ax.set_xticks(np.arange(AUC.shape[1]) + 0.5, minor=False)
# set tick labels
# ax.set_xticklabels(np.arange(1,AUC.shape[1]+1), minor=False)
ax.set_xticklabels(xticklabels, minor=False)
ax.set_yticklabels(yticklabels, minor=False)
# set title and x/y labels
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Remove last blank column
plt.xlim((0, AUC.shape[1]))
# Turn off all the ticks
ax = plt.gca()
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
# Add color bar
plt.colorbar(c)
# Add text in each cell
show_values(c)
# Proper orientation (origin at the top left instead of bottom left)
if correct_orientation:
ax.invert_yaxis()
ax.xaxis.tick_top()
# resize
fig = plt.gcf()
# fig.set_size_inches(cm2inch(40, 20))
# fig.set_size_inches(cm2inch(40*4, 20*4))
fig.set_size_inches(cm2inch(figure_width, figure_height))
def plot_classification_report(classification_report, filename,
title='Classification report ', cmap='RdBu'):
"""
Plot scikit-learn classification report.
Extension based on http://stackoverflow.com/a/31689645/395857
"""
lines = classification_report.split('\n')
classes = []
plotMat = []
support = []
class_names = []
for line in lines[2: (len(lines) - 2)]:
t = line.strip().split()
if len(t) < 2: continue
classes.append(t[0])
v = [float(x) for x in t[1: len(t) - 1]]
support.append(int(t[-1]))
class_names.append(t[0])
plotMat.append(v)
xlabel = 'Metrics'
ylabel = 'Classes'
xticklabels = ['Precision', 'Recall', 'F1-score']
yticklabels = ['{0} ({1})'.format(class_names[idx], sup) for idx, sup in
enumerate(support)]
figure_width = 25
figure_height = len(class_names) + 7
correct_orientation = False
heatmap(np.array(plotMat), title, xlabel, ylabel, xticklabels, yticklabels,
figure_width, figure_height, correct_orientation, cmap=cmap)
plt.savefig(filename,
dpi=200,
format='png',
bbox_inches='tight')
plt.close()
def plot_train_history(epochs, acc, loss, output_file):
trace0 = go.Scatter(x=epochs, y=loss, name='Loss')
trace1 = go.Scatter(x=epochs, y=acc, name='Accuracy')
layout = go.Layout(showlegend=True, title='Training history')
fig = go.Figure(data=[trace0, trace1], layout=layout)
py.plot(fig,
filename=output_file,
auto_open=False,
link_text=False)
def plot_data(X, y_labels, t, title):
unique_labels = np.unique(y_labels)
print('unique labels (%s): %s' % (title, unique_labels))
colors = ['grey', 'blue', 'black', 'orange', 'yellow', 'pink']
# Plot input data
traces = []
for label in unique_labels:
trace = go.Scatter(x=t[np.where(y_labels == label)[0]],
y=X[np.where(y_labels == label)[0]][:, 0],
name='Data (class %s)' % label,
mode='markers',
marker={'color': colors[int(label)]})
traces.append(trace)
layout = go.Layout(showlegend=True, title='Data (%s)' % title)
fig = go.Figure(data=traces, layout=layout)
py.plot(fig,
filename='%s_data.html' % title,
auto_open=False,
link_text=False)
def plot_predictions(t, X_values, y_true, y_pred, output_file_path, title):
"""
Plot prediction results (correct and incorrect)
:param t: (list) timesteps
:param X_values: (list) input scalar values (before any encoding)
:param y_true: (list) true labels
:param y_pred: (list) predicted labels
:param output_file_path: (str) path to output file
:param title: (str) title of the plot.
"""
if type(t) != np.ndarray:
t = np.array(t)
if type(X_values) != np.ndarray:
X_values = np.array(X_values)
if type(y_true) != np.ndarray:
y_true = np.array(y_true)
if type(y_pred) != np.ndarray:
y_pred = np.array(y_pred)
correct = []
incorrect = []
for prediction in y_true == y_pred:
correct.append(prediction)
incorrect.append(not prediction)
trace0 = go.Scatter(x=t[correct],
y=X_values[correct],
name='Correct predictions',
mode='markers', marker={'color': 'green'})
trace1 = go.Scatter(x=t[incorrect],
y=X_values[incorrect],
name='Incorrect predictions',
mode='markers', marker={'color': 'red'})
layout = go.Layout(showlegend=True, title=title)
fig = go.Figure(data=[trace0, trace1], layout=layout)
py.plot(fig,
filename=output_file_path,
auto_open=False,
link_text=False)
def plot_confusion_matrix(cm,
filename,
classes,
normalize=True,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# Use only 2 decimal numbers
cm = np.around(cm, 2)
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=30)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(filename)
| agpl-3.0 |
tuos/FlowAndCorrelations | healpy/isotropic/mapAllandSingleEvent/analysis_iso_smooth_single.py | 1 | 1349 |
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import scipy.special as spc
import math
import matplotlib as mpl
from scipy.special import lpmn
import scipy.integrate as integrate
from scipy.integrate import quad
from numpy import sin, cos
from matplotlib.cm import ScalarMappable
import random
nside = 64
npix = hp.nside2npix(nside)
SIZE = 400
DPI = 100
hpxmap2 = np.zeros(npix, dtype = np.float)
hpxmap1 = np.zeros(npix, dtype = np.float)
hpxmapNorm = np.zeros(npix, dtype = np.float)
events = 8000
mult = 2500
for i in range(events):
for k in range(mult):
ipix = random.randint(0, npix-1)
#hpxmap2[indices2[i]] += 1.0
hpxmap2[ipix] += npix*1.0/mult/events
if i == 0:
hpxmap1[ipix] += npix*1.0/mult
for i in range(npix):
if hpxmap2[i] != 0:
hpxmapNorm[i] = hpxmap1[i]*1.0/hpxmap2[i]
#hp_smoothed = hp.sphtfunc.smoothing(hpxmap2, fwhm=np.radians(1), iter = 1)
hp_smoothed = hp.smoothing(hpxmapNorm, fwhm=np.radians(5))
#hp.mollview(hp_smoothed, cmap = cm.jet, xsize = SIZE, min = 0.9, max = 1.1, title='Isotropic smoothed 1 event with normalization')
hp.mollview(hp_smoothed, cmap = cm.jet, xsize = SIZE, title='Isotropic smoothed 1 event with normalization')
hp.graticule()
plt.savefig("map_iso_smooth_single.png", dpi = DPI)
| mit |
saiwing-yeung/scikit-learn | sklearn/tests/test_calibration.py | 2 | 12016 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
ignore_warnings,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@ignore_warnings
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
lseman/pylspm | pylspm/test_heuristic.py | 1 | 2855 | import numpy as np
from numpy import inf
import pandas as pd
import scipy.stats
from .pylspm import PyLSpm
from .results import PyLSpmHTML
from .boot import PyLSboot
from itertools import combinations
from .mga import mga
from .permuta import permuta
def test_heuristic(nrboot, cores, data_, lvmodel, mvmodel, scheme, regression, h='0', maxit='100'):
test = 'pso'
if test == 'ga':
split = [1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1,
0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1]
if test == 'pso':
split = [0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0,
1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0]
split = pd.DataFrame(split)
split.columns = ['Split']
dataSplit = pd.concat([data_, split], axis=1)
nk = max(split['Split'])
splitado = []
f1 = []
for i in range(nk + 1):
data_ = (dataSplit.loc[dataSplit['Split']
== i]).drop('Split', axis=1)
data_.index = range(len(data_))
estima = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, 0, 100, HOC='true')
print(estima.path_matrix)
f1.append(estima.residuals()[3])
print(f1)
print(np.sum(f1))
print(1 / np.sum(f1))
compara = 1
if compara == 1:
allCombs = list(combinations(range(0, nk + 1), 2))
for i in range(len(allCombs)):
print("Groups " + str(allCombs[i][0]) + '-' + str(allCombs[i][1]))
print('MGA')
mga(50, 8, dataSplit, lvmodel,
mvmodel, scheme, regression, 0, 100, g1=allCombs[i][0], g2=allCombs[i][1],
segmento='Split')
print('Permutation')
permuta(nrboot, cores, data_, lvmodel,
mvmodel, scheme, regression, 0, 100, g1=allCombs[i][0], g2=allCombs[i][1])
| mit |
calebfoss/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 18 | 2444 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
nhejazi/scikit-learn | examples/linear_model/plot_ols_3d.py | 53 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
# #############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
kristoforcarlson/nest-simulator-fork | topology/examples/test_3d_gauss.py | 13 | 2641 | # -*- coding: utf-8 -*-
#
# test_3d_gauss.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 1000 (x,y,z) triplets
pos = [[random.uniform(-0.5,0.5), random.uniform(-0.5,0.5), random.uniform(-0.5,0.5)]
for j in range(1000)]
l1 = topo.CreateLayer({'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_neuron'})
# visualize
#xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
#xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(nest.GetChildren(l1)[0]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# Gaussian connections in full volume [-0.75,0.75]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.75,-0.75,-0.75], 'upper_right': [0.75,0.75,0.75]}},
'kernel':{'gaussian': {'p_center': 1., 'sigma': 0.25}}})
# show connections from center element
# sender shown in red, targets in green
ctr=topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr,l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr],[yctr],[zctr],s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt,ytgt,ztgt,s=40, facecolor='g', edgecolor='g')
tgts=topo.GetTargetNodes(ctr,l1)[0]
d=topo.Distance(ctr,tgts)
plt.figure()
plt.hist(d, 25)
#plt.show()
| gpl-2.0 |
sbirch/TUSK | tusk/analysis/distance.py | 1 | 4950 | import numpy as np
import scipy.spatial
from sklearn import manifold
import matplotlib.pyplot as plt
import tusk.atus as atus
import math
import functools32
from collections import Counter
random_cases = [x['caseid'] for x in atus.db.query('select respondents.caseid from respondents inner join summary on respondents.caseid=summary.caseid where minutes_working>120 order by random() limit 500')]
'''
random_cases = [20031009031805, 20110505111101, 20120302121318, 20030504033191,
20110302111360, 20040201041709, 20060302060511, 20080302081207, 20040112032778,
20100112091142, 20030705032646, 20100605100732, 20051110050839, 20081110081173,
20031211031999, 20090111080659, 20110101111533, 20040605041647, 20101111102089,
20050403051716, 20030112022622, 20100301101675, 20110504111952, 20050404050948,
20101211101391, 20110403111160, 20080302081151, 20100908101628, 20080201080730,
20080605080167, 20030504032942, 20110403110501, 20041211041182, 20070302071571,
20091009091727, 20060605061393, 20050706051141, 20051110051311, 20091110091985,
20100301101588, 20080605080547, 20031110030649, 20040504041194, 20110808112195,
20040201040766, 20050706051175, 20110212100922, 20100402101639, 20121009121668,
20090909091448, 20080201080836, 20040110033112, 20030807032050, 20120403121652,
20090908091367, 20051211052239, 20041009041726, 20110112101558, 20110604111221,
20110101112303, 20060302060501, 20110402111572, 20110504111043, 20060504061709,
20070504071540, 20100302100879, 20050707051673, 20060807060533, 20070504071415,
20061211061382, 20030403031703, 20080807082185, 20090706091095, 20070404072432,
20100201101183, 20030403031975, 20090303090536, 20090111081932, 20040907041797,
20060909061509, 20090302092134, 20120605122173, 20040704042004, 20120403122380,
20030212020681, 20030604033336, 20050605051503, 20070404071681, 20030908033334,
20040112031370, 20030403032090, 20050807051406, 20090504092493, 20060101061528,
20030302031598, 20060402062179, 20030908031287, 20070101070597, 20040908041705,
20120504122342]
'''
@functools32.lru_cache(maxsize=None)
def get_case_data(c):
r = atus.db.query('''select
activity_code,
activity_number,
start_minute,
stop_minute
from activities where caseid=%d order by start_minute''' % c)
return list(r)
@functools32.lru_cache(maxsize=None)
def get_case_data_1(c):
entries = get_case_data(c)
activity_vector = {}
for entry in entries:
for m in xrange(entry['start_minute'], entry['stop_minute']+1):
activity_vector[m] = entry['activity_code']
result = tuple([activity_vector.get(m,None) for m in xrange(0, 1440+1, 10)])
return result
@functools32.lru_cache(maxsize=None)
def get_case_data_2(c):
entries = get_case_data(c)
tier1_vector = Counter()
for entry in entries:
tier1_vector[int(entry['activity_code'][:2])] += entry['stop_minute'] - entry['start_minute']
# Note that this ignores class 18 (travel) and class 50 (data codes)
result = np.array([tier1_vector.get(tier1, 0) for tier1 in xrange(0, 18)])
return result
def actdiff(x,y):
if x is None or y is None:
return 1
x1,x2,x3 = x[:2],x[2:4],x[4:]
y1,y2,y3 = y[:2],y[2:4],y[4:]
if x1 == '18':
x1 = x2
if y1 == '18':
y1 = y2
score = 1
if x1 == y1:
score -= 0.5
if x1 == y1 and x2 == y2:
score -= 0.3
if x == y:
score -= 0.2
return score
def case_distance_1(c1, c2):
c1, c2 = int(c1), int(c2)
d1 = get_case_data_1(c1)
d2 = get_case_data_1(c2)
return sum([actdiff(x,y) for x,y in zip(d1,d2)])**3
def case_distance_2(c1, c2):
c1, c2 = int(c1), int(c2)
d1 = get_case_data_2(c1)
d2 = get_case_data_2(c2)
return np.sum( (d1-d2)**2 )
def color_day(case):
weekday = atus.db.get('''select weekday from respondents where caseid=%d''' % case)
if weekday.lower() in ['saturday', 'sunday']:
return plt.cm.winter(0.0)
return plt.cm.winter(1.0)
def color_work(case):
mins = atus.db.get('''select minutes_working from summary where caseid=%d''' % case)
if mins < 120:
return plt.cm.winter(0.0)
return plt.cm.winter(1.0)
def look(case):
marker = 'o'
weekday = atus.db.get('''select weekday from respondents where caseid=%d''' % case)
if weekday.lower() in ['saturday', 'sunday']:
marker = 'v'
#mins = atus.db.get('''select minutes_working from summary where caseid=%d''' % case)
income_code = atus.db.get('''select family_income_code from cps where caseid=%d and lineno=1''' % case)
return 40, marker, plt.cm.Greys(1.0 - (income_code/16.0))
np.set_printoptions(precision=2)
case_vectors = []
for case in random_cases:
case_vectors.append((case,))
dists = scipy.spatial.distance.pdist(
case_vectors, case_distance_2
)
dists = scipy.spatial.distance.squareform(dists)
#print scipy.spatial.distance.squareform(dists)
mds = manifold.MDS(2, dissimilarity="precomputed")
Y = mds.fit_transform(dists)
for i in xrange(len(Y)):
size, marker, color = look(case_vectors[i])
plt.scatter(Y[i, 0], Y[i, 1], c=color, s=size, marker=marker)
plt.title("MDS embedding")
plt.show() | mit |
aferr/TemporalPartitioningMemCtl | util/stats/output.py | 90 | 7981 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from chart import ChartOptions
class StatOutput(ChartOptions):
def __init__(self, jobfile, info, stat=None):
super(StatOutput, self).__init__()
self.jobfile = jobfile
self.stat = stat
self.invert = False
self.info = info
def display(self, name, printmode = 'G'):
import info
if printmode == 'G':
valformat = '%g'
elif printmode != 'F' and value > 1e6:
valformat = '%0.5e'
else:
valformat = '%f'
for job in self.jobfile.jobs():
value = self.info.get(job, self.stat)
if value is None:
return
if not isinstance(value, list):
value = [ value ]
if self.invert:
for i,val in enumerate(value):
if val != 0.0:
value[i] = 1 / val
valstring = ', '.join([ valformat % val for val in value ])
print '%-50s %s' % (job.name + ':', valstring)
def graph(self, name, graphdir, proxy=None):
from os.path import expanduser, isdir, join as joinpath
from barchart import BarChart
from matplotlib.numerix import Float, array, zeros
import os, re, urllib
from jobfile import crossproduct
confgroups = self.jobfile.groups()
ngroups = len(confgroups)
skiplist = [ False ] * ngroups
groupopts = []
baropts = []
groups = []
for i,group in enumerate(confgroups):
if group.flags.graph_group:
groupopts.append(group.subopts())
skiplist[i] = True
elif group.flags.graph_bars:
baropts.append(group.subopts())
skiplist[i] = True
else:
groups.append(group)
has_group = bool(groupopts)
if has_group:
groupopts = [ group for group in crossproduct(groupopts) ]
else:
groupopts = [ None ]
if baropts:
baropts = [ bar for bar in crossproduct(baropts) ]
else:
raise AttributeError, 'No group selected for graph bars'
directory = expanduser(graphdir)
if not isdir(directory):
os.mkdir(directory)
html = file(joinpath(directory, '%s.html' % name), 'w')
print >>html, '<html>'
print >>html, '<title>Graphs for %s</title>' % name
print >>html, '<body>'
html.flush()
for options in self.jobfile.options(groups):
chart = BarChart(self)
data = [ [ None ] * len(baropts) for i in xrange(len(groupopts)) ]
enabled = False
stacked = 0
for g,gopt in enumerate(groupopts):
for b,bopt in enumerate(baropts):
if gopt is None:
gopt = []
job = self.jobfile.job(options + gopt + bopt)
if not job:
continue
if proxy:
import db
proxy.dict['system'] = self.info[job.system]
val = self.info.get(job, self.stat)
if val is None:
print 'stat "%s" for job "%s" not found' % \
(self.stat, job)
if isinstance(val, (list, tuple)):
if len(val) == 1:
val = val[0]
else:
stacked = len(val)
data[g][b] = val
if stacked == 0:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
if data[i][j] is None:
data[i][j] = 0.0
else:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
val = data[i][j]
if val is None:
data[i][j] = [ 0.0 ] * stacked
elif len(val) != stacked:
raise ValueError, "some stats stacked, some not"
data = array(data)
if data.sum() == 0:
continue
dim = len(data.shape)
x = data.shape[0]
xkeep = [ i for i in xrange(x) if data[i].sum() != 0 ]
y = data.shape[1]
ykeep = [ i for i in xrange(y) if data[:,i].sum() != 0 ]
data = data.take(xkeep, axis=0)
data = data.take(ykeep, axis=1)
if not has_group:
data = data.take([ 0 ], axis=0)
chart.data = data
bopts = [ baropts[i] for i in ykeep ]
bdescs = [ ' '.join([o.desc for o in opt]) for opt in bopts]
if has_group:
gopts = [ groupopts[i] for i in xkeep ]
gdescs = [ ' '.join([o.desc for o in opt]) for opt in gopts]
if chart.legend is None:
if stacked:
try:
chart.legend = self.info.rcategories
except:
chart.legend = [ str(i) for i in xrange(stacked) ]
else:
chart.legend = bdescs
if chart.xticks is None:
if has_group:
chart.xticks = gdescs
else:
chart.xticks = []
chart.graph()
names = [ opt.name for opt in options ]
descs = [ opt.desc for opt in options ]
if names[0] == 'run':
names = names[1:]
descs = descs[1:]
basename = '%s-%s' % (name, ':'.join(names))
desc = ' '.join(descs)
pngname = '%s.png' % basename
psname = '%s.eps' % re.sub(':', '-', basename)
epsname = '%s.ps' % re.sub(':', '-', basename)
chart.savefig(joinpath(directory, pngname))
chart.savefig(joinpath(directory, epsname))
chart.savefig(joinpath(directory, psname))
html_name = urllib.quote(pngname)
print >>html, '''%s<br><img src="%s"><br>''' % (desc, html_name)
html.flush()
print >>html, '</body>'
print >>html, '</html>'
html.close()
| bsd-3-clause |
yask123/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
idlead/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
LohithBlaze/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
smeerten/ssnake | src/ssNake.py | 1 | 398261 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 - 2020 Bas van Meerten and Wouter Franssen
# This file is part of ssNake.
#
# ssNake is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ssNake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ssNake. If not, see <http://www.gnu.org/licenses/>.
EXE = False
import sys
if sys.version_info.major == 2:
import sip
sip.setapi('QString', 2)
print('DEPRECATION WARNING: From version 1.4 onwards, python2 is no longer supported. Consider upgrading to python3.')
import os
import importlib
try:
from PyQt5 import QtGui, QtCore, QtWidgets
QT = 5
except ImportError:
from PyQt4 import QtGui, QtCore
from PyQt4 import QtGui as QtWidgets
QT = 4
print('DEPRECATION WARNING: From version 1.4 onwards, PyQt4 is no longer supported. Consider upgrading to PyQt5.')
QtCore.pyqtRemoveInputHook()
import matplotlib
# First import matplotlib and Qt
if QT == 4:
matplotlib.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
else:
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import multiprocessing
# Create splash window
if __name__ == '__main__':
multiprocessing.freeze_support() #Fix multiprocessing for pyinstaller on windows (line does nothing otherwise)
root = QtWidgets.QApplication(sys.argv)
root.setWindowIcon(QtGui.QIcon(os.path.dirname(os.path.realpath(__file__)) + '/Icons/logo.gif'))
splash_pix = QtGui.QPixmap(os.path.dirname(os.path.realpath(__file__)) + '/Icons/logo.gif')
splash = QtWidgets.QSplashScreen(splash_pix, QtCore.Qt.WindowStaysOnTopHint)
splash.setMask(splash_pix.mask())
progressBar = QtWidgets.QProgressBar(splash)
progressBar.setGeometry(int(2.5 * splash.width() / 10.0), int(0.89 * splash.height()), int(5 * splash.width() / 10.0), int(splash.height() / 20.0))
splash.show()
def splashProgressStep(splashStep): # A function to easily increase the progressbar value
if __name__ == '__main__':
splashStep = splashStep + 1
progressBar.setValue(int(splashStep // splashSteps + (splashStep % splashSteps > 0))) # Rounds up without math or numpy module
root.processEvents()
return splashStep
def import_lib(name, nameAs, className, splashStep):
# Function to load a library from string names
if className is None:
globals()[nameAs] = importlib.import_module(name)
else:
mod = importlib.import_module(name)
globals()[nameAs] = getattr(mod, className)
return splashProgressStep(splashStep)
# List of all libs to be imported:
# [name, name to be saved as, import specific class]
importList = [['matplotlib.figure', 'Figure', 'Figure'],
['traceback', 'tb', None],
['numpy', 'np', None],
['copy', 'copy', None],
['gc', 'gc', None],
['multiprocessing', 'multiprocessing', None],
['datetime', 'datetime', None],
['webbrowser', 'webbrowser', None],
['spectrum', 'sc', None],
['hypercomplex', 'hc', None],
['fitting', 'fit', None],
['safeEval', 'safeEval', 'safeEval'],
['widgetClasses', 'wc', None],
['updateWindow', 'UpdateWindow', 'UpdateWindow'],
['saveFigure', 'SaveFigureWindow', 'SaveFigureWindow'],
['functions', 'func', None],
['specIO', 'io', None],
['views', 'views', None],
['simFunctions', 'sim', None],
['loadIsotopes', 'loadIsotopes', None],
['scipy', 'optimize', 'optimize']]
splashSteps = len(importList) / 100.0
splashStep = 0
# Import everything else
for elem in importList:
splashStep = import_lib(elem[0], elem[1], elem[2], splashStep)
isoPath = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + "IsotopeProperties"
ISOTOPES = loadIsotopes.getIsotopeInfo(isoPath)
matplotlib.rc('font', family='DejaVu Sans')
np.set_printoptions(threshold=sys.maxsize)
QtCore.QLocale.setDefault(QtCore.QLocale('en_US'))
VERSION = 'v1.3'
# Required library version
NPVERSION = '1.11.0'
MPLVERSION = '1.5.0'
SPVERSION = '0.14.1'
PY2VERSION = '2.7'
PY3VERSION = '3.4'
def splitString(val, size):
#Split a string at spaces, with maxsize 'size'
total = ''
while len(val) > size:
tmp = val[0:size]
space = tmp.rfind(' ')
total = total + val[0:space] + '\n'
val = val[space + 1::] #increment string. +1 to cut off the space
total = total + val
return total
#Prepare TOOLTIPS dictionary
TOOLTIPS = dict()
with open(os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'Tooltips', 'r') as f:
data = f.read().split('\n')
for line in data:
if line:
tmp = line.split('\t')
if len(tmp) > 2:
text = tmp[1] + '\n\n' + splitString(tmp[2], 80) #Header plus split main text
else:
text = tmp[1]
TOOLTIPS[tmp[0]] = text
class SsnakeException(sc.SpectrumException):
pass
class MainProgram(QtWidgets.QMainWindow):
def __init__(self, root):
super(MainProgram, self).__init__()
self.root = root
self.VERSION = VERSION
self.errors = []
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setAcceptDrops(True)
self.mainWindow = None
self.workspaces = []
self.workspaceNames = []
self.workspaceNum = 0
self.macros = {}
self.macroActions = {}
self.referenceName = [] # List with saved reference names
self.referenceValue = [] # List with saved reference values
self.referenceActions = {}
self.loadDefaults()
if self.defaultStartupBool:
self.lastLocation = os.path.expanduser(self.defaultStartupDir)
else:
if EXE:
self.lastLocation = os.path.expanduser('~')
else:
self.lastLocation = os.getcwd()
if not self.defaultTooltips: #Disable tooltips by setting them to empty strings
for elem in TOOLTIPS.keys():
TOOLTIPS[elem] = ''
self.initMenu()
self.menuCheck()
self.main_widget = QtWidgets.QSplitter(self)
self.main_widget.setHandleWidth(10)
self.gridWidget = QtWidgets.QWidget(self)
self.mainFrame = QtWidgets.QGridLayout(self.gridWidget)
self.tree = wc.SsnakeTreeWidget(self)
self.main_widget.addWidget(self.tree)
self.main_widget.addWidget(self.gridWidget)
self.logo = QtWidgets.QLabel(self)
self.logo.setPixmap(QtGui.QPixmap(os.path.dirname(os.path.realpath(__file__)) + "/Icons/logo.gif"))
self.mainFrame.addWidget(self.logo, 0, 0, QtCore.Qt.AlignCenter)
self.tabs = wc.SsnakeTabs(self)
self.tabs.setMovable(True)
self.tabs.tabBar().tabMoved.connect(self.moveWorkspace)
self.allowChange = True
self.tabs.setTabsClosable(True)
self.tabs.currentChanged.connect(self.changeMainWindow)
self.tabs.tabCloseRequested.connect(self.destroyWorkspace)
self.mainFrame.addWidget(self.tabs, 0, 0)
self.statusBar = QtWidgets.QStatusBar(self)
self.setStatusBar(self.statusBar)
self.tabs.hide()
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
self.eventFilter = wc.SsnakeEventFilter(self)
self.root.installEventFilter(self.eventFilter)
self.initToolbar()
self.main_widget.setStretchFactor(1, 10)
#Set double click filter for splitter
self.splitterEventFilter = wc.SplitterEventFilter(self.main_widget)
self.main_widget.handle(1).installEventFilter(self.splitterEventFilter)
self.resize(self.defaultWidth, self.defaultHeight)
if self.defaultMaximized:
self.showMaximized()
QtWidgets.QShortcut(QtGui.QKeySequence.Paste, self).activated.connect(self.handlePaste)
QtWidgets.QShortcut(QtGui.QKeySequence.Copy, self).activated.connect(self.handleCopy)
def dispError(self, error):
self.dispMsg("Program error. Please report.", color="red")
CurTime = datetime.datetime.now()
TimeStr = '{0:02d}'.format(CurTime.hour) + ':' + '{0:02d}'.format(CurTime.minute) + ':' + '{0:02d}'.format(CurTime.second)
self.errors.append([TimeStr, error])
def handlePaste(self):
self.dropEvent(QtWidgets.QApplication.instance().clipboard())
def handleCopy(self):
""" Makes a pixelwise copy of the currently viewed canvas """
if self.mainWindow is None:
return
if issubclass(type(self.mainWindow), fit.TabFittingWindow): #If fitting, take canvas from current tab
canvas = self.mainWindow.tabs.currentWidget().canvas
else:
canvas = self.mainWindow.canvas
if QT == 5:
screen = self.root.primaryScreen()
pixmap = screen.grabWindow(canvas.winId())
else:
pixmap = QtGui.QPixmap.grabWidget(canvas)
QtWidgets.QApplication.clipboard().setPixmap(pixmap)
def resetDefaults(self):
self.defaultUnits = 1
self.defaultShowTitle = True
self.defaultPPM = False
self.defaultWidth = 1
self.defaultHeight = 1
self.defaultMaximized = False
self.defaultAskName = True
self.defaultToolBar = True
self.defaultLinewidth = 1.0
self.defaultMinXTicks = 12
self.defaultMinYTicks = 8
self.defaultColor = '#1F77B4'
self.defaultGrids = [False, False]
self.defaultDiagonalBool = False
self.defaultDiagonalMult = 1
self.defaultZeroScroll = True
self.defaultZoomStep = 1
self.defaultColorRange = 'none'
self.defaultColorMap = 'seismic'
self.defaultPColorMap = 'gray'
self.defaultWidthRatio = 3.0
self.defaultHeightRatio = 3.0
self.defaultContourConst = True
self.defaultPosColor = '#1F77B4'
self.defaultNegColor = '#FF7F0E'
self.defaultStartupBool = False
self.defaultStartupDir = '~'
self.defaultTooltips = True
self.defaultToolbarActionList = ['File --> Open',
'File -- > Save --> Matlab',
'File --> Export --> Figure',
'Seperator',
'Workspaces --> Duplicate',
'Workspaces --> Delete',
'Seperator',
'Edit --> Undo',
'Edit --> Redo',
'Edit --> Reload',
'Seperator', 'Tools --> Apodize',
'Tools --> Phasing --> Phase',
'Tools --> Phasing --> Autophase 0',
'Seperator',
'Matrix --> Sizing',
'Matrix --> Shift Data',
'Matrix --> Multiply',
'Seperator',
'Fitting --> S/N',
'Fitting --> FWHM',
'Fitting --> Integrals',
'Fitting --> Relaxation Curve',
'Fitting --> Lorentzian/Gaussian',
'Seperator',
'Plot --> 1D Plot',
'Plot --> Stack Plot',
'Plot --> Array Plot',
'Plot --> Contour Plot',
'Plot --> Multi Plot',
'Seperator',
'History --> History',
'History --> Clear Undo/Redo List',
'Seperator',
'Utilities --> NMR Table']
def loadDefaults(self):
self.resetDefaults()
QtCore.QSettings.setDefaultFormat(QtCore.QSettings.IniFormat)
QtCore.QCoreApplication.setOrganizationName("ssNake")
QtCore.QCoreApplication.setApplicationName("ssNake")
settings = QtCore.QSettings()
try:
self.defaultUnits = settings.value("plot/units", self.defaultUnits, int)
except TypeError:
self.dispMsg("Incorrect value in the config file for the units")
self.defaultPPM = settings.value("plot/ppm", self.defaultPPM, bool)
self.defaultToolbarActionList = settings.value("toolbarList", self.defaultToolbarActionList, str)
self.defaultShowTitle = settings.value("plot/showTitle", self.defaultShowTitle, bool)
self.defaultColor = settings.value("plot/colour", self.defaultColor, str)
self.defaultColorRange = settings.value("plot/colourrange", self.defaultColorRange, str)
if not str(self.defaultColorRange) in views.COLORRANGELIST:
self.dispMsg("Incorrect colourrange in config file")
self.defaultColorRange = views.COLORRANGELIST[0]
try:
self.defaultLinewidth = settings.value("plot/linewidth", self.defaultLinewidth, float)
except TypeError:
self.dispMsg("Incorrect value in the config file for the plot/linewidth")
self.defaultMinXTicks = settings.value("plot/minXTicks", self.defaultMinXTicks, int)
self.defaultMinYTicks = settings.value("plot/minYTicks", self.defaultMinYTicks, int)
self.defaultGrids = [settings.value("plot/xgrid", self.defaultGrids[0], bool), settings.value("plot/ygrid", self.defaultGrids[1], bool)]
self.defaultZeroScroll = settings.value("plot/zeroscroll", self.defaultZeroScroll, bool)
self.defaultZoomStep = settings.value("plot/zoomstep", self.defaultZoomStep, float)
self.defaultColorMap = settings.value("contour/colourmap", self.defaultColorMap, str)
self.defaultContourConst = settings.value("contour/constantcolours", self.defaultContourConst, bool)
self.defaultPosColor = settings.value("contour/poscolour", self.defaultPosColor, str)
self.defaultNegColor = settings.value("contour/negcolour", self.defaultNegColor, str)
self.defaultPColorMap = settings.value("2Dcolor/colourmap", self.defaultPColorMap, str)
if not str(self.defaultColorMap) in views.COLORMAPLIST:
self.dispMsg("Incorrect colourmap in config file")
self.defaultColorMap = views.COLORMAPLIST[0]
if not str(self.defaultPColorMap) in views.COLORMAPLIST:
self.dispMsg("Incorrect pcolourmap in config file")
self.defaultPColorMap = views.COLORMAPLIST[0]
self.defaultDiagonalBool = settings.value("contour/diagonalbool", self.defaultDiagonalBool, bool)
try:
self.defaultDiagonalMult = settings.value("contour/diagonalmult", self.defaultDiagonalMult, float)
except TypeError:
self.dispMsg("Incorrect value in the config file for the diagonal multiplier")
self.defaultMaximized = settings.value("maximized", self.defaultMaximized, bool)
try:
self.defaultWidth = settings.value("width", self.defaultWidth, int)
except TypeError:
self.dispMsg("Incorrect value in the config file for the width")
try:
self.defaultHeight = settings.value("height", self.defaultHeight, int)
except TypeError:
self.dispMsg("Incorrect value in the config file for the height")
self.defaultAskName = settings.value("ask_name", self.defaultAskName, bool)
self.defaultToolBar = settings.value("toolbar", self.defaultToolBar, bool)
self.defaultStartupBool = settings.value("startupdiron", self.defaultStartupBool, bool)
self.defaultStartupDir = settings.value("startupdir", self.defaultStartupDir, str)
self.defaultTooltips = settings.value("tooltips", self.defaultTooltips, bool)
try:
self.defaultWidthRatio = settings.value("contour/width_ratio", self.defaultWidthRatio, float)
except TypeError:
self.dispMsg("Incorrect value in the config file for the contour/width_ratio")
try:
self.defaultHeightRatio = settings.value("contour/height_ratio", self.defaultHeightRatio, float)
except TypeError:
self.dispMsg("Incorrect value in the config file for the contour/height_ratio")
def saveDefaults(self):
QtCore.QSettings.setDefaultFormat(QtCore.QSettings.IniFormat)
QtCore.QCoreApplication.setOrganizationName("ssNake")
QtCore.QCoreApplication.setApplicationName("ssNake")
settings = QtCore.QSettings()
settings.setValue("plot/showTitle", self.defaultShowTitle)
settings.setValue("plot/units", self.defaultUnits)
settings.setValue("plot/ppm", self.defaultPPM)
settings.setValue('toolbarList', self.defaultToolbarActionList)
settings.setValue("plot/colour", self.defaultColor)
settings.setValue("plot/colourrange", self.defaultColorRange)
settings.setValue("plot/linewidth", self.defaultLinewidth)
settings.setValue("plot/minXTicks", self.defaultMinXTicks)
settings.setValue("plot/minYTicks", self.defaultMinYTicks)
settings.setValue("plot/xgrid", self.defaultGrids[0])
settings.setValue("plot/ygrid", self.defaultGrids[1])
settings.setValue("plot/zeroscroll", self.defaultZeroScroll)
settings.setValue("plot/zoomstep", self.defaultZoomStep)
settings.setValue("maximized", self.defaultMaximized)
settings.setValue("width", self.defaultWidth)
settings.setValue("height", self.defaultHeight)
settings.setValue("ask_name", self.defaultAskName)
settings.setValue("toolbar", self.defaultToolBar)
settings.setValue("startupdiron", self.defaultStartupBool)
settings.setValue("startupdir", self.defaultStartupDir)
settings.setValue("tooltips", self.defaultTooltips)
settings.setValue("contour/colourmap", self.defaultColorMap)
settings.setValue("contour/constantcolours", self.defaultContourConst)
settings.setValue("contour/poscolour", self.defaultPosColor)
settings.setValue("contour/negcolour", self.defaultNegColor)
settings.setValue("contour/width_ratio", self.defaultWidthRatio)
settings.setValue("contour/height_ratio", self.defaultHeightRatio)
settings.setValue("contour/diagonalbool", self.defaultDiagonalBool)
settings.setValue("contour/diagonalmult", self.defaultDiagonalMult)
settings.setValue("2Dcolor/colourmap", self.defaultPColorMap)
def dispMsg(self, msg, color='black'):
if color == 'red':
self.statusBar.setStyleSheet("QStatusBar{padding-left:8px;color:red;}")
else:
self.statusBar.setStyleSheet("QStatusBar{padding-left:8px;}")
self.statusBar.showMessage(msg, 10000)
def initToolbar(self):
if self.defaultToolBar:
self.toolbar = self.addToolBar('Toolbar')
self.toolbar.setMovable(False)
self.toolbar.setIconSize(QtCore.QSize(22, 22))
self.toolbar.toggleViewAction().setEnabled(False)
self.seperatorAction = []
self.allActionsList = [['Seperator', None],
['File --> Open', self.openAct],
['File --> Save --> JSON', self.saveAct],
['File -- > Save --> Matlab', self.saveMatAct],
['File --> Export --> Figure', self.savefigAct],
['File --> Export --> Simpson', self.saveSimpsonAct],
['File --> Export --> ASCII (1D/2D)', self.saveASCIIAct],
['File --> Preferences', self.preferencesAct],
['File --> Quit', self.quitAct],
['Workspaces --> Duplicate', self.newAct],
['Workspaces --> Slice to Workspace', self.newSlice],
['Workspaces --> Delete', self.closeAct],
['Workspaces --> Rename', self.renameWorkspaceAct],
['Workspaces --> Next', self.forwardAct],
['Workspaces --> Previous', self.backAct],
['Workspaces --> Info', self.workInfoAct],
['Macro --> Start Recording', self.macrostartAct],
['Macro --> Stop Recording', self.macrostopAct],
['Macro --> Load', self.macroLoadAct],
['Edit --> Undo', self.undoAction],
['Edit --> Redo', self.redoAction],
['Edit --> Reload', self.reloadAct],
['Edit --> Monitor', self.monitorAct],
['Tools --> Real', self.realAct],
['Tools --> Imag', self.imagAct],
['Tools --> Abs', self.absAct],
['Tools --> Complex Conjugate', self.conjAct],
['Tools --> Apodize', self.apodizeAct],
['Tools --> Phasing --> Phase', self.phaseAct],
['Tools --> Phasing --> Autophase 0', self.autoPhaseAct0],
['Tools --> Phasing --> Autophase 0+1', self.autoPhaseAct1],
['Tools --> Phasing --> Autophase per trace 0', self.autoPhaseAllAct0],
['Tools --> Phasing --> Autophase per trace 0+1', self.autoPhaseAllAct1],
['Tools --> Swap Echo', self.swapEchoAct],
['Tools --> Offset Correction', self.corOffsetAct],
['Tools --> Baseline Correction', self.baselineAct],
['Tools --> Subtract Averages', self.subAvgAct],
['Tools --> Reference Deconvolution', self.refDeconvAct],
['Tools --> Correct Digital Filter', self.digitalFilterAct],
['Tools --> Scale SW', self.scaleSWAct],
['Tools --> LPSVD', self.lpsvdAct],
['Matrix --> Sizing', self.sizingAct],
['Matrix --> Shift Data', self.shiftAct],
['Matrix --> Roll Data', self.rollAct],
['Matrix --> Align Maxima', self.alignAct],
['Matrix --> Multiply', self.multiplyAct],
['Matrix --> Normalize', self.normalizeAct],
['Matrix --> Region --> Integrate', self.intRegionAct],
['Matrix --> Region --> Sum', self.sumRegionAct],
['Matrix --> Region --> Max', self.maxRegionAct],
['Matrix --> Region --> Min', self.minRegionAct],
['Matrix --> Region --> Max Position', self.maxposRegionAct],
['Matrix --> Region --> Min Position', self.minposRegionAct],
['Matrix --> Region --> Average', self.averageRegionAct],
['Matrix --> Diff', self.diffAct],
['Matrix --> Cumsum', self.cumsumAct],
['Matrix --> Extract Part', self.extractpartAct],
['Matrix --> Flip L/R', self.fliplrAct],
['Matrix --> Delete', self.matrixdelAct],
['Matrix --> Split', self.splitAct],
['Matrix --> Reorder', self.reorderAct],
['Matrix --> Regrid', self.regridAct],
['Matrix --> Concatenate', self.concatAct],
['Matrix --> Shearing', self.shearAct],
['Transforms --> Fourier Transform', self.fourierAct],
['Transforms --> Real Fourier Transform', self.realFourierAct],
['Transforms --> Fftshift', self.fftshiftAct],
['Transforms --> Inv fftshift', self.invfftshiftAct],
['Transforms --> Hilbert Transform', self.hilbertAct],
['Transforms --> NUS --> FFM', self.ffmAct],
['Transforms --> NUS --> CLEAN', self.cleanAct],
['Transforms --> NUS --> IST', self.istAct],
['Transforms --> Hypercomplex --> States', self.statesAct],
['Transforms --> Hypercomplex --> TPPI', self.statesTPPIAct],
['Transforms --> Hypercomplex --> Echo-antiecho', self.echoantiAct],
['Fitting --> S/N', self.snrAct],
['Fitting --> FWHM', self.fwhmAct],
['Fitting --> Centre of Mass', self.massAct],
['Fitting --> Integrals', self.intfitAct],
['Fitting --> Relaxation Curve', self.relaxAct],
['Fitting --> Diffusion Curve', self.diffusionAct],
['Fitting --> Lorentzian/Gaussian', self.lorentzfitAct],
['Fitting --> CSA', self.csastaticAct],
['Fitting --> Quadrupole', self.quadAct],
['Fitting --> Quadrupole+CSA', self.quadCSAAct],
['Fitting --> Czjzek', self.czjzekAct],
['Fitting --> MQMAS', self.mqmasAct],
['Fitting --> Czjzek MQMAS', self.mqmasCzjzekAct],
['Fitting --> External', self.externalFitAct],
['Fitting --> Function', self.functionFitAct],
['Combine --> Combine Workspaces', self.combineWorkspaceAct],
['Combine --> Insert From Workspace', self.insertdatAct],
['Combine --> Add', self.adddatAct],
['Combine --> Subtract', self.subdatAct],
['Combine --> Multiply', self.multdatAct],
['Combine --> Divide', self.divdatAct],
['Plot --> 1D Plot', self.onedplotAct],
['Plot --> Scatter', self.scatterplotAct],
['Plot --> Stack Plot', self.stackplotAct],
['Plot --> Array Plot', self.arrayplotAct],
['Plot --> Contour Plot', self.contourplotAct],
['Plot --> 2D Colour Plot', self.colour2DplotAct],
['Plot --> Multi Plot', self.multiplotAct],
['Plot --> Set Reference', self.setrefAct],
['Plot --> Clear Current Reference', self.delrefAct],
['Plot --> Load Reference', self.loadrefAct],
['Plot --> User X-axis', self.userxAct],
['Plot --> Plot Settings', self.plotprefAct],
['History --> History', self.historyAct],
['History --> Clear Undo/Redo List', self.clearundoAct],
['Utilities --> Chemical Shift Conversion Tool', self.shiftconvAct],
['Utilities --> Dipolar Distance Tool', self.dipolarconvAct],
['Utilities --> Quadrupole Coupling Conversion Tool', self.quadconvAct],
['Utilities --> NMR Table', self.nmrtableAct],
['Help --> GitHub Page', self.githubAct],
['Help --> ssNake Tutorials', self.tutorialAct],
['Help --> About', self.aboutAct]]
for element in self.defaultToolbarActionList:
if element == 'Seperator':
self.seperatorAction.append(QtWidgets.QAction(self))
self.seperatorAction[-1].setSeparator(True)
self.toolbar.addAction(self.seperatorAction[-1])
else:
for action in self.allActionsList:
if element == action[0]:
self.toolbar.addAction(action[1])
def initMenu(self):
IconDirectory = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'Icons' + os.path.sep
self.menubar = self.menuBar()
self.filemenu = QtWidgets.QMenu('&File', self)
self.menubar.addMenu(self.filemenu)
self.openAct = self.filemenu.addAction(QtGui.QIcon(IconDirectory + 'open.png'), '&Open', self.loadFromMenu, QtGui.QKeySequence.Open)
self.openAct.setToolTip('Open a File')
self.combineLoadAct = self.filemenu.addAction(QtGui.QIcon(IconDirectory + 'combine.png'), '&Open && Combine', self.createCombineLoadWindow)
self.combineLoadAct.setToolTip('Open and Combine Multiple Files')
self.savemenu = QtWidgets.QMenu('&Save', self)
self.filemenu.addMenu(self.savemenu)
self.saveAct = self.savemenu.addAction(QtGui.QIcon(IconDirectory + 'JSON.png'), 'JSON', self.saveJSONFile, QtGui.QKeySequence.Save)
self.saveAct.setToolTip('Save as JSON File')
self.saveMatAct = self.savemenu.addAction(QtGui.QIcon(IconDirectory + 'Matlab.png'), 'MATLAB', self.saveMatlabFile)
self.saveMatAct.setToolTip('Save as MATLAB File')
self.exportmenu = QtWidgets.QMenu('&Export', self)
self.filemenu.addMenu(self.exportmenu)
self.savefigAct = self.exportmenu.addAction(QtGui.QIcon(IconDirectory + 'figure.png'), 'Figure', self.saveFigure, QtGui.QKeySequence.Print)
self.savefigAct.setToolTip('Export as Figure')
self.saveSimpsonAct = self.exportmenu.addAction(QtGui.QIcon(IconDirectory + 'simpson.png'), 'Simpson', self.saveSimpsonFile)
self.saveSimpsonAct.setToolTip('Export as Simpson File')
self.saveASCIIAct = self.exportmenu.addAction(QtGui.QIcon(IconDirectory + 'ASCII.png'), 'ASCII (1D/2D)', self.saveASCIIFile)
self.saveASCIIAct.setToolTip('Save as ASCII Text File')
self.preferencesAct = self.filemenu.addAction(QtGui.QIcon(IconDirectory + 'preferences.png'), '&Preferences', lambda: PreferenceWindow(self))
self.preferencesAct.setToolTip('Open Preferences Window')
self.quitAct = self.filemenu.addAction(QtGui.QIcon(IconDirectory + 'quit.png'), '&Quit', self.fileQuit, QtGui.QKeySequence.Quit)
self.quitAct.setToolTip('Close ssNake')
self.saveActList = [self.saveAct, self.saveMatAct]
self.exportActList = [self.savefigAct, self.saveSimpsonAct, self.saveASCIIAct]
self.fileActList = [self.openAct, self.saveAct, self.saveMatAct,
self.savefigAct, self.saveSimpsonAct, self.saveASCIIAct,
self.combineLoadAct, self.preferencesAct, self.quitAct]
# Workspaces menu
self.workspacemenu = QtWidgets.QMenu('&Workspaces', self)
self.menubar.addMenu(self.workspacemenu)
self.newAct = self.workspacemenu.addAction(QtGui.QIcon(IconDirectory + 'duplicate.png'), 'D&uplicate', self.duplicateWorkspace, QtGui.QKeySequence.New)
self.newAct.setToolTip('Duplicate Workspace')
self.newSlice = self.workspacemenu.addAction(QtGui.QIcon(IconDirectory + 'duplicate.png'), 'Slice to Workspace', lambda: self.duplicateWorkspace(sliceOnly=True))
self.newSlice.setToolTip('Copy Current Slice to New Workspace')
self.closeAct = self.workspacemenu.addAction(QtGui.QIcon(IconDirectory + 'delete.png'), '&Delete', self.destroyWorkspace, QtGui.QKeySequence.Close)
self.closeAct.setToolTip('Delete Workspace')
self.renameWorkspaceAct = self.workspacemenu.addAction(QtGui.QIcon(IconDirectory + 'rename.png'), '&Rename', self.renameWorkspace, QtCore.Qt.Key_F2)
self.renameWorkspaceAct.setToolTip('Rename Workspace')
self.activemenu = QtWidgets.QMenu('&Go to', self)
self.workspacemenu.addMenu(self.activemenu)
self.forwardAct = self.workspacemenu.addAction(QtGui.QIcon(IconDirectory + 'next.png'), '&Next', lambda: self.stepWorkspace(1), QtGui.QKeySequence.Forward)
self.forwardAct.setToolTip('Next Workspace')
self.backAct = self.workspacemenu.addAction(QtGui.QIcon(IconDirectory + 'previous.png'), '&Previous', lambda: self.stepWorkspace(-1), QtGui.QKeySequence.Back)
self.backAct.setToolTip('Previous Workspace')
self.workInfoAct = self.workspacemenu.addAction(QtGui.QIcon(IconDirectory + 'about.png'), '&Info', lambda: self.mainWindowCheck(lambda mainWindow: WorkInfoWindow(mainWindow)))
self.workInfoAct.setToolTip('Workspace Information')
self.workspaceActList = [self.newAct, self.newSlice, self.closeAct, self.renameWorkspaceAct,
self.forwardAct, self.backAct, self.workInfoAct]
# Macro menu
self.macromenu = QtWidgets.QMenu('&Macros', self)
self.menubar.addMenu(self.macromenu)
self.macrostartAct = self.macromenu.addAction(QtGui.QIcon(IconDirectory + 'record.png'), 'St&art Recording', self.macroCreate)
self.macrostartAct.setToolTip('Start Recording Macro')
self.macrostopAct = self.macromenu.addAction(QtGui.QIcon(IconDirectory + 'stop.png'), 'St&op Recording', self.stopMacro)
self.macrostopAct.setToolTip('Stop Recording Macro')
self.macrolistmenu = QtWidgets.QMenu('&Run', self)
self.macromenu.addMenu(self.macrolistmenu)
self.macrorenamemenu = QtWidgets.QMenu('Re&name', self)
self.macromenu.addMenu(self.macrorenamemenu)
self.macrodeletemenu = QtWidgets.QMenu('&Delete', self)
self.macromenu.addMenu(self.macrodeletemenu)
self.macrosavemenu = QtWidgets.QMenu('&Save', self)
self.macromenu.addMenu(self.macrosavemenu)
self.macroLoadAct = self.macromenu.addAction(QtGui.QIcon(IconDirectory + 'open.png'), '&Load', self.loadMacro)
self.macroLoadAct.setToolTip('Load Macro')
self.macroActList = [self.macrostartAct, self.macrostopAct]
self.multiDActions = []
# the edit drop down menu
self.editmenu = QtWidgets.QMenu("&Edit", self)
self.menubar.addMenu(self.editmenu)
self.undoAction = self.editmenu.addAction(QtGui.QIcon(IconDirectory + 'undo.png'), "&Undo", self.undo, QtGui.QKeySequence.Undo)
self.undoAction.setShortcutContext(QtCore.Qt.WidgetShortcut)
self.undoAction.setToolTip('Undo')
self.redoAction = self.editmenu.addAction(QtGui.QIcon(IconDirectory + 'redo.png'), "&Redo", self.redo, QtGui.QKeySequence.Redo)
self.redoAction.setShortcutContext(QtCore.Qt.WidgetShortcut)
self.redoAction.setToolTip('Redo')
self.noUndoAct = QtWidgets.QAction("&No Undo Mode", self.editmenu, checkable=True)
self.noUndoAct.toggled.connect(self.noUndoMode)
self.editmenu.addAction(self.noUndoAct)
self.clearundoAct = self.editmenu.addAction(QtGui.QIcon(IconDirectory + 'delete.png'), "&Clear Undo/Redo List", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.clearUndo()))
self.clearundoAct.setToolTip('Clear Undo/Redo List')
self.reloadAct = self.editmenu.addAction(QtGui.QIcon(IconDirectory + 'reload.png'), "Re&load", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.reloadLast()), QtGui.QKeySequence.Refresh)
self.reloadAct.setToolTip('Reload Current Data')
self.monitorAct = self.editmenu.addAction(QtGui.QIcon(IconDirectory + 'monitor.png'), "&Monitor", lambda: self.mainWindowCheck(lambda mainWindow: MonitorWindow(mainWindow)))
self.monitorAct.setToolTip('Monitor Current Data')
self.editActList = [self.undoAction, self.redoAction, self.clearundoAct, self.noUndoAct, self.reloadAct, self.monitorAct]
# the tool drop down menu
self.toolMenu = QtWidgets.QMenu("&Tools", self)
self.menubar.addMenu(self.toolMenu)
self.realAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'real.png'), "&Real", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.real()))
self.realAct.setToolTip('Take Real Part of Data')
self.imagAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'imag.png'), "&Imag", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.imag()))
self.imagAct.setToolTip('Take Imaginary Part of Data')
self.absAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'abs.png'), "&Abs", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.abs()))
self.absAct.setToolTip('Take Absolute of Data')
self.conjAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'complexconj.png'), "&Complex Conjugate", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.conj()))
self.conjAct.setToolTip('Take Complex Conjugate of Data')
self.apodizeAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'apodize.png'), "Apo&dize", lambda: self.mainWindowCheck(lambda mainWindow: ApodWindow(mainWindow)))
self.apodizeAct.setToolTip('Open Apodize Window')
self.phasingmenu = QtWidgets.QMenu('&Phasing', self)
self.toolMenu.addMenu(self.phasingmenu)
self.phaseAct = self.phasingmenu.addAction(QtGui.QIcon(IconDirectory + 'phase.png'), "&Phase", lambda: self.mainWindowCheck(lambda mainWindow: PhaseWindow(mainWindow)))
self.phaseAct.setToolTip('Open Phasing Window')
self.autoPhaseAct0 = self.phasingmenu.addAction(QtGui.QIcon(IconDirectory + 'autophase0.png'), "Autophase 0", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.directAutoPhase(0)))
self.autoPhaseAct0.setToolTip('Autophase 0 order')
self.autoPhaseAct1 = self.phasingmenu.addAction(QtGui.QIcon(IconDirectory + 'autophase1.png'), "Autophase 0+1", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.directAutoPhase(1)))
self.autoPhaseAct1.setToolTip('Autophase 0 and 1 order')
self.autoPhaseAllAct0 = self.phasingmenu.addAction(QtGui.QIcon(IconDirectory + 'autophase0.png'), "Autophase per trace 0", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.autoPhaseAll(0)))
self.autoPhaseAllAct0.setToolTip('Autophase per trace 0 order')
self.autoPhaseAllAct1 = self.phasingmenu.addAction(QtGui.QIcon(IconDirectory + 'autophase1.png'), "Autophase per trace 0+1", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.autoPhaseAll(1)))
self.autoPhaseAllAct1.setToolTip('Autophase per trace 0 and 1 order')
self.swapEchoAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'swapecho.png'), "Swap &Echo", lambda: self.mainWindowCheck(lambda mainWindow: SwapEchoWindow(mainWindow)))
self.swapEchoAct.setToolTip('Swap Echo')
self.corOffsetAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'offset.png'), "&Offset Correction", lambda: self.mainWindowCheck(lambda mainWindow: DCWindow(mainWindow)))
self.corOffsetAct.setToolTip('Offset Correction')
self.baselineAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'baseline.png'), "&Baseline Correction", lambda: self.mainWindowCheck(lambda mainWindow: BaselineWindow(mainWindow)))
self.baselineAct.setToolTip('Baseline Correction')
self.subAvgAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'subaverage.png'), "S&ubtract Averages", lambda: self.mainWindowCheck(lambda mainWindow: SubtractAvgWindow(mainWindow)))
self.subAvgAct.setToolTip('Subtract Averages')
self.refDeconvAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'deconvolute.png'), "Re&ference Deconvolution", lambda: self.mainWindowCheck(lambda mainWindow: FiddleWindow(mainWindow)))
self.refDeconvAct.setToolTip('Reference Deconvolution')
self.digitalFilterAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'dFilter.png'), "&Correct Digital Filter", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.CorrectDigitalFilter()))
self.digitalFilterAct.setToolTip("Correct Digital Filter")
self.lpsvdAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'LPSVD.png'), "&LPSVD", lambda: self.mainWindowCheck(lambda mainWindow: LPSVDWindow(mainWindow)))
self.lpsvdAct.setToolTip('LPSVD linear prediction')
self.scaleSWAct = self.toolMenu.addAction(QtGui.QIcon(IconDirectory + 'ScaleSW.png'), "Scale SW", lambda: self.mainWindowCheck(lambda mainWindow: ScaleSWWindow(mainWindow)))
self.scaleSWAct.setToolTip('Scale the Current Spectral Width')
self.referencelistmenu = QtWidgets.QMenu('&Reference', self)
self.toolMenu.addMenu(self.referencelistmenu)
self.setrefAct = self.referencelistmenu.addAction(QtGui.QIcon(IconDirectory + 'setreference.png'), "&Set Reference", lambda: self.mainWindowCheck(lambda mainWindow: RefWindow(mainWindow)))
self.setrefAct.setToolTip('Set Reference')
self.delrefAct = self.referencelistmenu.addAction(QtGui.QIcon(IconDirectory + 'delete.png'), "&Clear Current Reference", self.referenceClear)
self.delrefAct.setToolTip('Clear Current Reference')
self.referencerunmenu = QtWidgets.QMenu('&Apply', self)
self.referencelistmenu.addMenu(self.referencerunmenu)
self.referencedeletemenu = QtWidgets.QMenu('&Delete', self)
self.referencelistmenu.addMenu(self.referencedeletemenu)
self.referencerenamemenu = QtWidgets.QMenu('Re&name', self)
self.referencelistmenu.addMenu(self.referencerenamemenu)
self.referencesavemenu = QtWidgets.QMenu('&Save', self)
self.referencelistmenu.addMenu(self.referencesavemenu)
self.loadrefAct = self.referencelistmenu.addAction(QtGui.QIcon(IconDirectory + 'open.png'), "&Load", self.referenceLoad)
self.loadrefAct.setToolTip('Load Reference')
self.toolsActList = [self.realAct, self.imagAct, self.absAct, self.conjAct,
self.apodizeAct, self.phaseAct, self.autoPhaseAct0,
self.autoPhaseAct1, self.autoPhaseAllAct0, self.phasingmenu,
self.autoPhaseAllAct1, self.swapEchoAct, self.corOffsetAct,
self.baselineAct, self.subAvgAct, self.refDeconvAct, self.lpsvdAct,
self.digitalFilterAct, self.scaleSWAct]
# the matrix drop down menu
self.matrixMenu = QtWidgets.QMenu("M&atrix", self)
self.menubar.addMenu(self.matrixMenu)
self.sizingAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'sizing.png'), "&Sizing", lambda: self.mainWindowCheck(lambda mainWindow: SizeWindow(mainWindow)))
self.sizingAct.setToolTip('Set Size')
self.shiftAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'shift.png'), "S&hift Data", lambda: self.mainWindowCheck(lambda mainWindow: ShiftDataWindow(mainWindow)))
self.shiftAct.setToolTip('Shift Data')
self.rollAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'roll.png'), "Roll Data", lambda: self.mainWindowCheck(lambda mainWindow: RollDataWindow(mainWindow)))
self.rollAct.setToolTip('Roll Data')
self.alignAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'alignMax.png'), "Align Maxima", lambda: self.mainWindowCheck(lambda mainWindow: AlignDataWindow(mainWindow)))
self.alignAct.setToolTip('Align Maxima')
self.regionMenu = QtWidgets.QMenu("Region", self)
self.matrixMenu.addMenu(self.regionMenu)
self.intRegionAct = self.regionMenu.addAction(QtGui.QIcon(IconDirectory + 'int.png'), "&Integrate", lambda: self.mainWindowCheck(lambda mainWindow: integrateWindow(mainWindow)))
self.intRegionAct.setToolTip('Integrate Region')
self.sumRegionAct = self.regionMenu.addAction(QtGui.QIcon(IconDirectory + 'sum.png'), "S&um", lambda: self.mainWindowCheck(lambda mainWindow: sumWindow(mainWindow)))
self.sumRegionAct.setToolTip('Sum Region')
self.maxRegionAct = self.regionMenu.addAction(QtGui.QIcon(IconDirectory + 'max.png'), "&Max", lambda: self.mainWindowCheck(lambda mainWindow: maxWindow(mainWindow)))
self.maxRegionAct.setToolTip('Maximum of Region')
self.minRegionAct = self.regionMenu.addAction(QtGui.QIcon(IconDirectory + 'min.png'), "M&in", lambda: self.mainWindowCheck(lambda mainWindow: minWindow(mainWindow)))
self.minRegionAct.setToolTip('Minimum of Region')
self.maxposRegionAct = self.regionMenu.addAction(QtGui.QIcon(IconDirectory + 'maxpos.png'), "Ma&x position", lambda: self.mainWindowCheck(lambda mainWindow: argmaxWindow(mainWindow)))
self.maxposRegionAct.setToolTip('Position of Maximum of Region')
self.minposRegionAct = self.regionMenu.addAction(QtGui.QIcon(IconDirectory + 'minpos.png'), "Mi&n position", lambda: self.mainWindowCheck(lambda mainWindow: argminWindow(mainWindow)))
self.minposRegionAct.setToolTip('Position of Minimum of Region')
self.averageRegionAct = self.regionMenu.addAction(QtGui.QIcon(IconDirectory + 'average.png'), "&Average", lambda: self.mainWindowCheck(lambda mainWindow: avgWindow(mainWindow)))
self.averageRegionAct.setToolTip('Average of Region')
self.diffAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'diff.png'), "&Diff", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.diff()))
self.diffAct.setToolTip('Difference')
self.cumsumAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'cumsum.png'), "&Cumsum", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.cumsum()))
self.cumsumAct.setToolTip('Cumulative sum')
self.extractpartAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'extractpart.png'), "&Extract part", lambda: self.mainWindowCheck(lambda mainWindow: extractRegionWindow(mainWindow)))
self.extractpartAct.setToolTip('Extract part')
self.fliplrAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'fliplr.png'), "&Flip L/R", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.flipLR()))
self.fliplrAct.setToolTip('Flip L/R')
self.matrixdelAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'matrixdelete.png'), "De&lete", lambda: self.mainWindowCheck(lambda mainWindow: DeleteWindow(mainWindow)))
self.matrixdelAct.setToolTip('Delete Points')
self.splitAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'split.png'), "S&plit", lambda: self.mainWindowCheck(lambda mainWindow: SplitWindow(mainWindow)))
self.splitAct.setToolTip('Split')
self.multiplyAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'multiply.png'), "Mul&tiply", lambda: self.mainWindowCheck(lambda mainWindow: MultiplyWindow(mainWindow)))
self.multiplyAct.setToolTip('Multiply')
self.normalizeAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'normalize.png'), "Normalize", lambda: self.mainWindowCheck(lambda mainWindow: NormalizeWindow(mainWindow)))
self.normalizeAct.setToolTip('Normalize')
self.reorderAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'reorder.png'), "&Reorder", lambda: self.mainWindowCheck(lambda mainWindow: ReorderWindow(mainWindow)))
self.reorderAct.setToolTip('Reorder')
self.regridAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'regrid.png'), "Regrid", lambda: self.mainWindowCheck(lambda mainWindow: RegridWindow(mainWindow)))
self.regridAct.setToolTip('Regrid')
self.concatAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'concatenate.png'), "C&oncatenate", lambda: self.mainWindowCheck(lambda mainWindow: ConcatenateWindow(mainWindow)))
self.concatAct.setToolTip('Concatenate')
self.multiDActions.append(self.concatAct)
self.shearAct = self.matrixMenu.addAction(QtGui.QIcon(IconDirectory + 'shear.png'), "Shearin&g", lambda: self.mainWindowCheck(lambda mainWindow: ShearingWindow(mainWindow)))
self.shearAct.setToolTip('Shearing')
self.multiDActions.append(self.shearAct)
self.matrixActList = [self.sizingAct, self.shiftAct, self.rollAct, self.alignAct, self.intRegionAct,
self.sumRegionAct, self.maxRegionAct, self.minRegionAct,
self.maxposRegionAct, self.minposRegionAct, self.averageRegionAct,
self.diffAct, self.cumsumAct, self.extractpartAct,
self.fliplrAct, self.matrixdelAct, self.splitAct,
self.multiplyAct, self.normalizeAct, self.reorderAct, self.regridAct,
self.concatAct, self.shearAct]
# the Transforms drop down menu
self.transformsMenu = QtWidgets.QMenu("T&ransforms", self)
self.menubar.addMenu(self.transformsMenu)
self.fourierAct = self.transformsMenu.addAction(QtGui.QIcon(IconDirectory + 'fourier.png'), "&Fourier Transform", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.fourier()), QtCore.Qt.CTRL + QtCore.Qt.Key_F)
self.fourierAct.setToolTip('Fourier Transform')
self.realFourierAct = self.transformsMenu.addAction(QtGui.QIcon(IconDirectory + 'realfourier.png'), "&Real Fourier Transform", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.realFourier()))
self.realFourierAct.setToolTip('Real Fourier Transform')
self.fftshiftAct = self.transformsMenu.addAction(QtGui.QIcon(IconDirectory + 'fftshift.png'), "Fft&shift", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.fftshift()))
self.fftshiftAct.setToolTip('Fftshift')
self.invfftshiftAct = self.transformsMenu.addAction(QtGui.QIcon(IconDirectory + 'ifftshift.png'), "&Inv fftshift", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.invFftshift()))
self.invfftshiftAct.setToolTip('Inverse fftshift')
self.hilbertAct = self.transformsMenu.addAction(QtGui.QIcon(IconDirectory + 'hilbert.png'), "&Hilbert Transform", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.hilbert()))
self.hilbertAct.setToolTip('Hilbert Transform')
self.nusMenu = QtWidgets.QMenu("&NUS", self)
self.transformsMenu.addMenu(self.nusMenu)
self.ffmAct = self.nusMenu.addAction(QtGui.QIcon(IconDirectory + 'ffm.png'), "&FFM", lambda: self.mainWindowCheck(lambda mainWindow: FFMWindow(mainWindow)))
self.ffmAct.setToolTip('FFM')
self.cleanAct = self.nusMenu.addAction(QtGui.QIcon(IconDirectory + 'clean.png'), "&CLEAN", lambda: self.mainWindowCheck(lambda mainWindow: CLEANWindow(mainWindow)))
self.cleanAct.setToolTip('CLEAN')
self.istAct = self.nusMenu.addAction(QtGui.QIcon(IconDirectory + 'ist.png'), "&IST", lambda: self.mainWindowCheck(lambda mainWindow: ISTWindow(mainWindow)))
self.istAct.setToolTip('IST')
self.hypercomplexMenu = QtWidgets.QMenu("Hypercomplex", self)
self.transformsMenu.addMenu(self.hypercomplexMenu)
self.statesAct = self.hypercomplexMenu.addAction(QtGui.QIcon(IconDirectory + 'States.png'), "&States", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.states()))
self.statesAct.setToolTip('States Hypercomplex Data Processing')
self.statesTPPIAct = self.hypercomplexMenu.addAction(QtGui.QIcon(IconDirectory + 'statestppi.png'), "States-&TPPI", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.statesTPPI()))
self.statesTPPIAct.setToolTip('States-TPPI Hypercomplex Data Processing')
self.echoantiAct = self.hypercomplexMenu.addAction(QtGui.QIcon(IconDirectory + 'echoantiecho.png'), "Ec&ho-antiecho", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.echoAntiEcho()))
self.echoantiAct.setToolTip('Ec&ho-antiecho Hypercomplex Data Processing')
self.transformActList = [self.fourierAct, self.realFourierAct, self.fftshiftAct,
self.invfftshiftAct, self.hilbertAct, self.ffmAct,
self.cleanAct, self.istAct, self.statesAct, self.statesTPPIAct, self.echoantiAct]
# the fitting drop down menu
self.fittingMenu = QtWidgets.QMenu("F&itting", self)
self.menubar.addMenu(self.fittingMenu)
self.snrAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'snr.png'), "&S/N", lambda: self.mainWindowCheck(lambda mainWindow: SNWindow(mainWindow)))
self.snrAct.setToolTip('Signal-to-Noise Ratio')
self.fwhmAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'fwhm.png'), "&FWHM", lambda: self.mainWindowCheck(lambda mainWindow: FWHMWindow(mainWindow)))
self.fwhmAct.setToolTip('Full Width at Half Maximum')
self.massAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'mass.png'), "Centre of Mass", lambda: self.mainWindowCheck(lambda mainWindow: COMWindow(mainWindow)))
self.massAct.setToolTip('Centre of Mass')
self.intfitAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'int.png'), "&Integrals", lambda: self.mainWindowCheck(lambda mainWindow: IntegralsWindow(mainWindow)))
self.intfitAct.setToolTip('Get Integrals')
self.relaxAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'relaxation.png'), "&Relaxation Curve", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.createRelaxWindow()))
self.relaxAct.setToolTip('Fit Relaxation Curve')
self.diffusionAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'diffusion.png'), "&Diffusion Curve", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.createDiffusionWindow()))
self.diffusionAct.setToolTip('Fit Diffusion Curve')
self.lorentzfitAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'lorentz.png'), "&Lorentzian/Gaussian", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.createPeakDeconvWindow()))
self.lorentzfitAct.setToolTip('Fit Lorentzian/Gaussian')
self.csastaticAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'csastatic.png'), "&CSA", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.createCsaDeconvWindow()))
self.csastaticAct.setToolTip('Fit CSA')
self.quadAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'quadconversion.png'), "&Quadrupole", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.createQuadDeconvWindow()))
self.quadAct.setToolTip('Fit Quadrupole')
self.quadCSAAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'quadcsa.png'), "Q&uadrupole+CSA", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.createQuadCSADeconvWindow()))
self.quadCSAAct.setToolTip('Fit Quadrupole+CSA')
self.czjzekAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'czjzekstatic.png'), "C&zjzek", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.createQuadCzjzekWindow()))
self.czjzekAct.setToolTip('Fit Czjzek Pattern')
self.mqmasAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'mqmas.png'), "&MQMAS", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.createMQMASWindow()))
self.mqmasAct.setToolTip('Fit MQMAS')
self.mqmasCzjzekAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'mqmas.png'), "Cz&jzek MQMAS", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.createMQMASCzjzekWindow()))
self.mqmasCzjzekAct.setToolTip('Fit Czjzek MQMAS')
self.externalFitAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'simpson.png'), "&External", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.createExternalFitWindow()))
self.externalFitAct.setToolTip('Fit External')
self.functionFitAct = self.fittingMenu.addAction(QtGui.QIcon(IconDirectory + 'function.png'), "F&unction fit", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.createFunctionFitWindow()))
self.functionFitAct.setToolTip('Fit Function')
self.fittingActList = [self.snrAct, self.fwhmAct, self.massAct,
self.intfitAct, self.relaxAct, self.diffusionAct,
self.lorentzfitAct, self.csastaticAct, self.quadAct, self.quadCSAAct,
self.czjzekAct, self.externalFitAct, self.functionFitAct]
# the combine drop down menu
self.combineMenu = QtWidgets.QMenu("Com&bine", self)
self.menubar.addMenu(self.combineMenu)
self.combineWorkspaceAct = self.combineMenu.addAction(QtGui.QIcon(IconDirectory + 'combine.png'), '&Combine Workspaces', self.createCombineWorkspaceWindow)
self.combineWorkspaceAct.setToolTip('Combine Workspaces')
self.insertdatAct = self.combineMenu.addAction(QtGui.QIcon(IconDirectory + 'insert.png'), "&Insert From Workspace", lambda: self.mainWindowCheck(lambda mainWindow: InsertWindow(mainWindow)))
self.insertdatAct.setToolTip('Insert From Workspace')
self.adddatAct = self.combineMenu.addAction(QtGui.QIcon(IconDirectory + 'add.png'), "&Add", lambda: self.mainWindowCheck(lambda mainWindow: CombineWindow(mainWindow, 0)))
self.adddatAct.setToolTip('Add Data From Workspace')
self.subdatAct = self.combineMenu.addAction(QtGui.QIcon(IconDirectory + 'subtract.png'), "&Subtract", lambda: self.mainWindowCheck(lambda mainWindow: CombineWindow(mainWindow, 1)))
self.subdatAct.setToolTip('Subtract Data From Workspace')
self.multdatAct = self.combineMenu.addAction(QtGui.QIcon(IconDirectory + 'multiplyWorkspace.png'), "&Multiply", lambda: self.mainWindowCheck(lambda mainWindow: CombineWindow(mainWindow, 2)))
self.multdatAct.setToolTip('Multiply Data From Workspace')
self.divdatAct = self.combineMenu.addAction(QtGui.QIcon(IconDirectory + 'divideWorkspace.png'), "&Divide", lambda: self.mainWindowCheck(lambda mainWindow: CombineWindow(mainWindow, 3)))
self.divdatAct.setToolTip('Divide Data From Workspace')
self.combineActList = [self.combineWorkspaceAct, self.insertdatAct, self.adddatAct,
self.subdatAct, self.multdatAct, self.divdatAct]
# the plot drop down menu
self.plotMenu = QtWidgets.QMenu("&Plot", self)
self.menubar.addMenu(self.plotMenu)
self.onedplotAct = self.plotMenu.addAction(QtGui.QIcon(IconDirectory + '1dplot.png'), "&1D Plot", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.plot1D()))
self.onedplotAct.setToolTip('1D plot')
self.scatterplotAct = self.plotMenu.addAction(QtGui.QIcon(IconDirectory + 'scatterplot.png'), "&Scatter Plot", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.plotScatter()))
self.scatterplotAct.setToolTip('Scatter Plot')
self.stackplotAct = self.plotMenu.addAction(QtGui.QIcon(IconDirectory + 'stack.png'), "S&tack Plot", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.plotStack()))
self.stackplotAct.setToolTip('Stack Plot')
self.multiDActions.append(self.stackplotAct)
self.arrayplotAct = self.plotMenu.addAction(QtGui.QIcon(IconDirectory + 'array.png'), "&Array Plot", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.plotArray()))
self.arrayplotAct.setToolTip('Array Plot')
self.multiDActions.append(self.arrayplotAct)
self.contourplotAct = self.plotMenu.addAction(QtGui.QIcon(IconDirectory + 'contour.png'), "&Contour Plot", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.plotContour()))
self.contourplotAct.setToolTip('Contour Plot')
self.multiDActions.append(self.contourplotAct)
self.multiContourplotAct = self.plotMenu.addAction(QtGui.QIcon(IconDirectory + 'multicontour.png'),"Mu<i Contour Plot", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.plotMultiContour()))
self.multiContourplotAct.setToolTip('Multi Contour Plot')
self.multiDActions.append(self.multiContourplotAct)
self.colour2DplotAct = self.plotMenu.addAction(QtGui.QIcon(IconDirectory + '2DColour.png'), "2D Colour Plot", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.plotColour2D()))
self.colour2DplotAct.setToolTip('2D Colour Plot')
self.multiDActions.append(self.colour2DplotAct)
self.multiplotAct = self.plotMenu.addAction(QtGui.QIcon(IconDirectory + 'multi.png'), "&Multi Plot", lambda: self.mainWindowCheck(lambda mainWindow: mainWindow.plotMulti()))
self.multiplotAct.setToolTip('Multi Plot')
#==========
self.userxAct = self.plotMenu.addAction(QtGui.QIcon(IconDirectory + 'xaxis.png'), "&User X-axis", lambda: self.mainWindowCheck(lambda mainWindow: XaxWindow(mainWindow)))
self.userxAct.setToolTip('User X-axis')
self.plotprefAct = self.plotMenu.addAction(QtGui.QIcon(IconDirectory + 'preferences.png'), "&Plot Settings", lambda: self.mainWindowCheck(lambda mainWindow: PlotSettingsWindow(mainWindow)))
self.plotprefAct.setToolTip('Plot Settings')
self.plotActList = [self.onedplotAct, self.scatterplotAct, self.stackplotAct,
self.arrayplotAct, self.contourplotAct, self.colour2DplotAct, self.multiplotAct,
self.multiContourplotAct, self.setrefAct, self.delrefAct, self.userxAct, self.plotprefAct]
# the history drop down menu
self.historyMenu = QtWidgets.QMenu("&History", self)
self.menubar.addMenu(self.historyMenu)
self.historyAct = self.historyMenu.addAction(QtGui.QIcon(IconDirectory + 'history.png'), "&History", lambda: self.mainWindowCheck(lambda mainWindow: HistoryWindow(mainWindow)))
self.historyAct.setToolTip('Show Processing History')
self.errorAct = self.historyMenu.addAction(QtGui.QIcon(IconDirectory + 'error.png'), "&Error Messages", lambda: errorWindow(self))
self.errorAct.setToolTip('Show Error Messages')
self.historyActList = [self.historyAct]
# Utilities dropdown menu
self.utilitiesMenu = QtWidgets.QMenu("&Utilities", self)
self.menubar.addMenu(self.utilitiesMenu)
self.shiftconvAct = self.utilitiesMenu.addAction(QtGui.QIcon(IconDirectory + 'shifttool.png'), "&Chemical Shift Conversion Tool", self.createShiftConversionWindow)
self.shiftconvAct.setToolTip('Chemical Shift Conversion Tool')
self.dipolarconvAct = self.utilitiesMenu.addAction(QtGui.QIcon(IconDirectory + 'dipolar.png'), "Dipolar Distance Tool", self.createDipolarDistanceWindow)
self.dipolarconvAct.setToolTip('Dipolar Distance Tool')
self.quadconvAct = self.utilitiesMenu.addAction(QtGui.QIcon(IconDirectory + 'quadconversion.png'), "&Quadrupole Coupling Conversion Tool", self.createQuadConversionWindow)
self.quadconvAct.setToolTip('Quadrupole Coupling Conversion Tool')
self.mqmasconvAct = self.utilitiesMenu.addAction(QtGui.QIcon(IconDirectory + 'mqmas.png'), "MQMAS Parameter Extraction Tool", self.createMqmasExtractWindow)
self.mqmasconvAct.setToolTip('MQMAS Parameter Extraction Tool')
self.tempcalAct = self.utilitiesMenu.addAction(QtGui.QIcon(IconDirectory + 'temperature.png'), "Temperature Calibration Tool", self.createTempcalWindow)
self.tempcalAct.setToolTip('Dipolar Distance Tool')
self.nmrtableAct = self.utilitiesMenu.addAction(QtGui.QIcon(IconDirectory + 'table.png'), "&NMR Table", self.nmrTable)
self.nmrtableAct.setToolTip('NMR Periodic Table')
self.utilitiesActList = [self.shiftconvAct, self.quadconvAct, self.nmrtableAct, self.dipolarconvAct, self.mqmasconvAct, self.tempcalAct]
# the help drop down menu
self.helpMenu = QtWidgets.QMenu("&Help", self)
self.menubar.addMenu(self.helpMenu)
if not EXE:
self.updateAct = self.helpMenu.addAction(QtGui.QIcon(IconDirectory + 'update.png'), "&Update", self.updateMenu)
self.updateAct.setToolTip('Update ssNake')
self.helpActList = [self.updateAct]
else:
self.helpActList = []
self.refmanAct = self.helpMenu.addAction(QtGui.QIcon(IconDirectory + 'manual.png'), "Reference Manual", openRefMan)
self.refmanAct.setToolTip('Open the Reference Manual')
self.basTutorialAct = self.helpMenu.addAction(QtGui.QIcon(IconDirectory + 'Tutorial.png'), "Basic Tutorial", openTutorial)
self.basTutorialAct.setToolTip('Open the Tutorial Folder')
self.tutorialAct = self.helpMenu.addAction(QtGui.QIcon(IconDirectory + 'Tutorial.png'), "Advanced Tutorials", lambda: webbrowser.open('https://github.com/smeerten/ssnake_tutorials/'))
self.tutorialAct.setToolTip('Link to ssNake Advanced Processing Tutorials')
self.githubAct = self.helpMenu.addAction(QtGui.QIcon(IconDirectory + 'GitHub.png'), "GitHub Page", lambda: webbrowser.open('https://github.com/smeerten/ssnake/'))
self.githubAct.setToolTip('ssNake GitHub Page')
self.aboutAct = self.helpMenu.addAction(QtGui.QIcon(IconDirectory + 'about.png'), "&About", lambda: aboutWindow(self))
self.aboutAct.setToolTip('About Menu')
self.helpActList = self.helpActList + [self.shiftconvAct, self.quadconvAct, self.nmrtableAct, self.githubAct,
self.tutorialAct, self.aboutAct, self.basTutorialAct]
# Extra event lists:
self.specOnlyList = [self.regridAct, self.csastaticAct, self.quadAct, self.quadCSAAct, self.czjzekAct]
self.fidOnlyList = [self.relaxAct, self.diffusionAct, self.swapEchoAct]
self.Only1DPlot = [self.snrAct, self.fwhmAct, self.massAct, self.intfitAct]
self.notInArrayPlot = [self.userxAct, self.setrefAct, self.swapEchoAct, self.corOffsetAct, self.baselineAct, self.subAvgAct,
self.refDeconvAct, self.intRegionAct, self.sumRegionAct, self.maxRegionAct, self.maxRegionAct,
self.minRegionAct, self.maxposRegionAct, self.minposRegionAct, self.averageRegionAct,
self.extractpartAct, self.matrixdelAct, self.normalizeAct, self.regridAct]
def mainWindowCheck(self, transfer):
# checks if mainWindow exist to execute the function
if self.mainWindow is not None:
transfer(self.mainWindow)
else:
raise SsnakeException("No workspaces open")
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dropEvent(self, event):
fileList = [url.toLocalFile() for url in event.mimeData().urls()]
self.loadData(fileList)
def menuCheck(self):
if self.mainWindow is None:
self.savemenu.menuAction().setEnabled(False)
self.exportmenu.menuAction().setEnabled(False)
self.workspacemenu.menuAction().setEnabled(False)
self.macrolistmenu.menuAction().setEnabled(False)
self.editmenu.menuAction().setEnabled(False)
self.toolMenu.menuAction().setEnabled(True)
self.matrixMenu.menuAction().setEnabled(False)
self.transformsMenu.menuAction().setEnabled(False)
self.fittingMenu.menuAction().setEnabled(False)
self.combineMenu.menuAction().setEnabled(False)
self.referencerunmenu.menuAction().setEnabled(False)
for act in self.saveActList + self.exportActList + self.workspaceActList + self.macroActList + self.editActList + self.toolsActList + self.matrixActList + self.transformActList + self.fittingActList + self.plotActList + self.combineActList + self.historyActList:
act.setEnabled(False)
else:
self.editmenu.menuAction().setEnabled(True)
self.toolMenu.menuAction().setEnabled(True)
self.matrixMenu.menuAction().setEnabled(True)
self.transformsMenu.menuAction().setEnabled(True)
self.fittingMenu.menuAction().setEnabled(True)
self.combineMenu.menuAction().setEnabled(True)
self.referencerunmenu.menuAction().setEnabled(True)
for act in self.editActList + self.toolsActList + self.matrixActList + self.transformActList + self.fittingActList + self.plotActList + self.historyActList + self.combineActList:
act.setEnabled(True)
if type(self.mainWindow) is Main1DWindow:
self.menuEnable(True)
for act in self.specOnlyList:
act.setEnabled(self.mainWindow.current.spec() == 1) # Only on for spec
for act in self.fidOnlyList:
act.setEnabled(self.mainWindow.current.spec() == 0) # Only on for FID
#Limit functions based on plot type
if type(self.mainWindow.current) == views.CurrentMulti or type(self.mainWindow.current) == views.CurrentStacked or type(self.mainWindow.current) == views.CurrentArrayed:
for act in self.Only1DPlot:
act.setEnabled(False)
if type(self.mainWindow.current) == views.CurrentArrayed:
for act in self.notInArrayPlot:
act.setEnabled(False)
if self.mainWindow.masterData.noUndo: # Set menu check to the same value as in the data
self.noUndoAct.setChecked(True)
else:
self.noUndoAct.setChecked(False)
if len(self.mainWindow.masterData.shape()) < 2:
for i in self.multiDActions:
i.setEnabled(False)
else:
for i in self.multiDActions:
i.setEnabled(True)
if not self.mainWindow.masterData.undoList:
self.undoAction.setEnabled(False)
else:
self.undoAction.setEnabled(True)
if not self.mainWindow.masterData.redoList:
self.redoAction.setEnabled(False)
else:
self.redoAction.setEnabled(True)
if not self.mainWindow.masterData.undoList and not self.mainWindow.masterData.redoList:
self.clearundoAct.setEnabled(False)
self.savemenu.menuAction().setEnabled(True)
self.exportmenu.menuAction().setEnabled(True)
self.savefigAct.setEnabled(True)
self.macrolistmenu.menuAction().setEnabled(True)
if self.mainWindow.currentMacro is None:
self.macrostopAct.setEnabled(False)
self.macrostartAct.setEnabled(True)
else:
self.macrostopAct.setEnabled(True)
self.macrostartAct.setEnabled(False)
self.savemenu.menuAction().setEnabled(True)
self.exportmenu.menuAction().setEnabled(True)
self.workspacemenu.menuAction().setEnabled(True)
elif type(self.mainWindow) is SaveFigureWindow:
self.menuEnable(False, True)
self.savemenu.menuAction().setEnabled(False)
self.exportmenu.menuAction().setEnabled(False)
for act in self.workspaceActList:
act.setEnabled(True)
for act in self.saveActList + self.exportActList:
act.setEnabled(False)
self.workspacemenu.menuAction().setEnabled(True)
self.macrolistmenu.menuAction().setEnabled(False)
self.workInfoAct.setEnabled(False)
else: #Fitting menu
self.menuEnable(False, True)
self.savemenu.menuAction().setEnabled(False)
self.exportmenu.menuAction().setEnabled(True)
self.workspacemenu.menuAction().setEnabled(True)
self.macrolistmenu.menuAction().setEnabled(False)
for act in self.saveActList + self.exportActList + self.workspaceActList:
act.setEnabled(False)
for act in self.workspaceActList:
act.setEnabled(True)
self.savefigAct.setEnabled(True)
self.workInfoAct.setEnabled(False)
def menuEnable(self, enable=True, internalWindow=False):
self.menuActive = enable or internalWindow
self.macrolistmenu.menuAction().setEnabled(enable)
self.editmenu.menuAction().setEnabled(enable)
self.matrixMenu.menuAction().setEnabled(enable)
self.transformsMenu.menuAction().setEnabled(enable)
self.fittingMenu.menuAction().setEnabled(enable)
self.combineMenu.menuAction().setEnabled(enable)
self.referencerunmenu.menuAction().setEnabled(enable)
# Actions:
for act in self.macroActList + self.editActList + self.toolsActList + self.matrixActList + self.transformActList + self.fittingActList + self.plotActList + self.combineActList + self.historyActList:
act.setEnabled(enable)
if not internalWindow:
self.tree.setEnabled(enable)
self.filemenu.menuAction().setEnabled(enable)
self.workspacemenu.menuAction().setEnabled(enable)
self.toolMenu.menuAction().setEnabled(enable)
for act in self.fileActList + self.workspaceActList:
act.setEnabled(enable)
for i in range(self.tabs.count()):
if i != self.workspaceNum:
self.tabs.setTabEnabled(i, enable)
self.undoAction.setEnabled(enable)
self.redoAction.setEnabled(enable)
def askName(self, filePath=None, name=None):
if filePath is None:
message = 'Spectrum name'
else:
message = 'Spectrum name for: ' + filePath
count = 0
if name is None:
name = 'spectrum' + str(count)
while name in self.workspaceNames:
count += 1
name = 'spectrum' + str(count)
givenName, ok = QtWidgets.QInputDialog.getText(self, message, 'Name:', text=name)
if not ok:
return None
while (givenName in self.workspaceNames) or givenName == '':
self.dispMsg("Workspace name '" + givenName + "' already exists")
givenName, ok = QtWidgets.QInputDialog.getText(self, message, 'Name:', text=name)
if not ok:
return None
return givenName
def undo(self, *args):
if isinstance(self.mainWindow, Main1DWindow):
self.mainWindow.undo()
def redo(self, *args):
if self.mainWindow is not None:
self.mainWindow.redo()
def macroCreate(self):
if self.mainWindow is None:
return
if self.mainWindow.currentMacro is not None:
return
count = 0
name = 'macro' + str(count)
while name in self.macros.keys():
count += 1
name = 'macro' + str(count)
givenName, ok = QtWidgets.QInputDialog.getText(self, 'Macro name', 'Name:', text=name)
if not ok:
return
while (givenName in self.macros.keys()) or (givenName == ''):
self.dispMsg("Macro name '" + givenName + "' already exists")
givenName, ok = QtWidgets.QInputDialog.getText(self, 'Macro name', 'Name:', text=name)
if not ok:
return
self.macros[givenName] = []
self.mainWindow.redoMacro = []
self.mainWindow.currentMacro = givenName
IconDirectory = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'Icons' + os.path.sep
action1 = self.macrolistmenu.addAction(QtGui.QIcon(IconDirectory + 'run.png'), givenName, lambda name=givenName: self.runMacro(name))
action2 = self.macrosavemenu.addAction(QtGui.QIcon(IconDirectory + 'save.png'), givenName, lambda name=givenName: self.saveMacro(name))
action3 = self.macrodeletemenu.addAction(QtGui.QIcon(IconDirectory + 'delete.png'), givenName, lambda name=givenName: self.deleteMacro(name))
action4 = self.macrorenamemenu.addAction(QtGui.QIcon(IconDirectory + 'rename.png'), givenName, lambda name=givenName: self.renameMacro(name))
self.macroActions[givenName] = [action1, action2, action3, action4]
self.menuCheck()
def renameMacro(self, oldName):
if self.mainWindow is None:
return
count = 0
name = 'macro' + str(count)
while name in self.macros.keys():
count += 1
name = 'macro' + str(count)
givenName, ok = QtWidgets.QInputDialog.getText(self, 'Macro name', 'Name:', text=name)
while (givenName in self.macros.keys()) or givenName == '':
if not ok:
return
self.dispMsg("Macro name '" + givenName + "' already exists")
givenName, ok = QtWidgets.QInputDialog.getText(self, 'Macro name', 'Name:', text=name)
self.macros[givenName] = self.macros.pop(oldName)
if self.mainWindow.currentMacro == oldName:
self.mainWindow.currentMacro = givenName
oldActions = self.macroActions.pop(oldName)
self.macrolistmenu.removeAction(oldActions[0])
self.macrosavemenu.removeAction(oldActions[1])
self.macrodeletemenu.removeAction(oldActions[2])
self.macrorenamemenu.removeAction(oldActions[3])
IconDirectory = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'Icons' + os.path.sep
action1 = self.macrolistmenu.addAction(QtGui.QIcon(IconDirectory + 'run.png'), givenName, lambda name=givenName: self.runMacro(name))
action2 = self.macrosavemenu.addAction(QtGui.QIcon(IconDirectory + 'save.png'), givenName, lambda name=givenName: self.saveMacro(name))
action3 = self.macrodeletemenu.addAction(QtGui.QIcon(IconDirectory + 'delete.png'), givenName, lambda name=givenName: self.deleteMacro(name))
action4 = self.macrorenamemenu.addAction(QtGui.QIcon(IconDirectory + 'rename.png'), givenName, lambda name=givenName: self.renameMacro(name))
self.macroActions[givenName] = [action1, action2, action3, action4]
self.menuCheck()
def stopMacro(self):
if self.mainWindow is None:
return
if self.mainWindow.currentMacro is None:
return
self.mainWindow.redoMacro = []
self.mainWindow.currentMacro = None
self.menuCheck()
def macroAdd(self, name, macros):
self.macros[name].append(macros)
def runMacro(self, name):
if self.mainWindow is not None:
self.mainWindow.runMacro(self.macros[name])
def saveMacro(self, name):
fileName = QtWidgets.QFileDialog.getSaveFileName(self, 'Save file', self.lastLocation + os.path.sep + name + '.macro', 'MACRO (*.macro)')
if isinstance(fileName, tuple):
fileName = fileName[0]
if not fileName:
return
self.lastLocation = os.path.dirname(fileName)
outputMacro = self.macros[name]
with open(fileName, 'w') as f:
for line in outputMacro:
f.write(line[0])
f.write(repr(line[1]).replace('\n', '').replace(' ', ''))
f.write('\n')
def deleteMacro(self, name):
self.macrolistmenu.removeAction(self.macroActions[name][0])
self.macrosavemenu.removeAction(self.macroActions[name][1])
self.macrodeletemenu.removeAction(self.macroActions[name][2])
self.macrorenamemenu.removeAction(self.macroActions[name][3])
del self.macros[name]
del self.macroActions[name]
for i in self.workspaces:
if i.currentMacro == name:
i.currentMacro = None
self.menuCheck()
def loadMacro(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', self.lastLocation)
if isinstance(filename, tuple):
filename = filename[0]
if filename: # if not cancelled
self.lastLocation = os.path.dirname(filename) # Save used path
if not filename:
return
self.stopMacro()
count = 0
name = 'macro' + str(count)
while name in self.macros.keys():
count += 1
name = 'macro' + str(count)
givenName, ok = QtWidgets.QInputDialog.getText(self, 'Macro name', 'Name:', text=name)
while (givenName in self.macros.keys()) or (givenName == ''):
if not ok:
return
self.dispMsg("Macro name '" + givenName + "' already exists")
givenName, ok = QtWidgets.QInputDialog.getText(self, 'Macro name', 'Name:', text=name)
with open(filename, 'r') as f:
stringList = f.readlines()
self.macros[givenName] = []
for line in stringList:
splitLine = line.split("(", 1)
splitLine[0] = splitLine[0].replace(' ', '')
splitLine[1] = safeEval("(" + splitLine[1])
self.macros[givenName].append(splitLine)
IconDirectory = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'Icons' + os.path.sep
action1 = self.macrolistmenu.addAction(QtGui.QIcon(IconDirectory + 'run.png'), givenName, lambda name=givenName: self.runMacro(name))
action2 = self.macrosavemenu.addAction(QtGui.QIcon(IconDirectory + 'save.png'), givenName, lambda name=givenName: self.saveMacro(name))
action3 = self.macrodeletemenu.addAction(QtGui.QIcon(IconDirectory + 'delete.png'), givenName, lambda name=givenName: self.deleteMacro(name))
action4 = self.macrorenamemenu.addAction(QtGui.QIcon(IconDirectory + 'rename.png'), givenName, lambda name=givenName: self.renameMacro(name))
self.macroActions[givenName] = [action1, action2, action3, action4]
self.menuCheck()
def referenceAdd(self, reffreq, name):
self.referenceName.append(name)
self.referenceValue.append(reffreq) # List with saved refrence values
IconDirectory = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'Icons' + os.path.sep
action1 = self.referencerunmenu.addAction(QtGui.QIcon(IconDirectory + 'run.png'), name, lambda name=name: self.referenceRun(name))
action2 = self.referencedeletemenu.addAction(QtGui.QIcon(IconDirectory + 'delete.png'), name, lambda name=name: self.referenceRemove(name))
action3 = self.referencerenamemenu.addAction(QtGui.QIcon(IconDirectory + 'rename.png'), name, lambda name=name: self.referenceRename(name))
action4 = self.referencesavemenu.addAction(QtGui.QIcon(IconDirectory + 'save.png'), name, lambda name=name: self.referenceSave(name))
self.referenceActions[name] = [action1, action2, action3, action4]
self.menuCheck()
def referenceClear(self):
self.mainWindow.current.setRef(None)
def referenceRun(self, name):
reffreq = self.referenceValue[self.referenceName.index(name)]
self.mainWindow.current.setRef(reffreq)
def referenceRemove(self, name):
self.referenceValue.remove(self.referenceValue[self.referenceName.index(name)])
self.referenceName.remove(name)
self.referencerunmenu.removeAction(self.referenceActions[name][0])
self.referencedeletemenu.removeAction(self.referenceActions[name][1])
self.referencerenamemenu.removeAction(self.referenceActions[name][2])
self.referencesavemenu.removeAction(self.referenceActions[name][3])
del self.referenceActions[name]
self.menuCheck()
def referenceRename(self, oldName):
if self.mainWindow is None:
return
givenName, ok = QtWidgets.QInputDialog.getText(self, 'Reference name', 'Name:', text=oldName)
if givenName == oldName or not ok:
return
while (givenName in self.referenceName) or (givenName == ''):
self.dispMsg('Name exists')
givenName, ok = QtWidgets.QInputDialog.getText(self, 'Reference name', 'Name:', text=oldName)
if not ok:
return
self.referenceName[self.referenceName.index(oldName)] = givenName
oldActions = self.referenceActions.pop(oldName)
self.referencerunmenu.removeAction(oldActions[0])
self.referencedeletemenu.removeAction(oldActions[1])
self.referencerenamemenu.removeAction(oldActions[2])
self.referencesavemenu.removeAction(oldActions[3])
IconDirectory = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'Icons' + os.path.sep
action1 = self.referencerunmenu.addAction(QtGui.QIcon(IconDirectory + 'run.png'), givenName, lambda name=givenName: self.referenceRun(name))
action2 = self.referencedeletemenu.addAction(QtGui.QIcon(IconDirectory + 'delete.png'), givenName, lambda name=givenName: self.referenceRemove(name))
action3 = self.referencerenamemenu.addAction(QtGui.QIcon(IconDirectory + 'rename.png'), givenName, lambda name=givenName: self.referenceRename(name))
action4 = self.referencesavemenu.addAction(QtGui.QIcon(IconDirectory + 'save.png'), givenName, lambda name=givenName: self.referenceSave(name))
self.referenceActions[givenName] = [action1, action2, action3, action4]
self.menuCheck()
def referenceSave(self, name):
fileName = QtWidgets.QFileDialog.getSaveFileName(self, 'Save reference', self.lastLocation + os.path.sep + name + '.txt', 'txt (*.txt)')
if isinstance(fileName, tuple):
fileName = fileName[0]
if not fileName:
return
self.lastLocation = os.path.dirname(fileName)
reffreq = self.referenceValue[self.referenceName.index(name)]
with open(fileName, 'w') as f:
f.write(str(reffreq))
def referenceLoad(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', self.lastLocation)
if isinstance(filename, tuple):
filename = filename[0]
if filename: # if not cancelled
self.lastLocation = os.path.dirname(filename) # Save used path
if not filename:
return
name = os.path.basename(filename)
if name.endswith('.txt'): #If regular extension, name becomes filename - extension
name = name[:-4]
count = 0
while name in self.referenceName: #If name known, cycle trough defaults
name = 'ref' + str(count)
count += 1
givenName, ok = QtWidgets.QInputDialog.getText(self, 'Reference name', 'Name:', text=name)
while (givenName in self.macros.keys()) or (givenName == ''):
if not ok:
return
self.dispMsg('Name exists')
givenName, ok = QtWidgets.QInputDialog.getText(self, 'Macro name', 'Name:', text=name)
with open(filename, 'r') as f:
self.referenceName.append(givenName)
try:
freq = float(f.read())
except Exception:
raise SsnakeException("Failed loading '" + filename + "' as reference.")
IconDirectory = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'Icons' + os.path.sep
self.referenceValue.append(freq)
action1 = self.referencerunmenu.addAction(QtGui.QIcon(IconDirectory + 'run.png'), givenName, lambda name=givenName: self.referenceRun(name))
action2 = self.referencedeletemenu.addAction(QtGui.QIcon(IconDirectory + 'delete.png'), givenName, lambda name=givenName: self.referenceRemove(name))
action3 = self.referencerenamemenu.addAction(QtGui.QIcon(IconDirectory + 'rename.png'), givenName, lambda name=givenName: self.referenceRename(name))
action4 = self.referencesavemenu.addAction(QtGui.QIcon(IconDirectory + 'save.png'), givenName, lambda name=givenName: self.referenceSave(name))
self.referenceActions[givenName] = [action1, action2, action3, action4]
self.menuCheck()
def noUndoMode(self, val):
self.mainWindow.current.setNoUndo(val)
self.menuCheck()
def changeMainWindow(self, var):
if not self.allowChange:
return
self.logo.hide()
self.tabs.show()
if isinstance(var, int):
num = var
else:
num = self.workspaceNames.index(var)
self.workspaceNum = num
self.mainWindow = self.workspaces[num]
self.tabs.setCurrentIndex(num)
self.updWorkspaceMenu()
self.menuCheck()
try:
if type(self.mainWindow.current) is views.CurrentMulti:
self.mainWindow.sideframe.checkChanged()
except Exception:
pass
def moveWorkspace(self, end, start):
self.workspaces.insert(end, self.workspaces.pop(start))
self.workspaceNames.insert(end, self.workspaceNames.pop(start))
if self.workspaceNum == start:
self.workspaceNum = end
elif self.workspaceNum > end and self.workspaceNum < start:
self.workspaceNum += 1
elif self.workspaceNum < end and self.workspaceNum > start:
self.workspaceNum -= 1
def stepWorkspace(self, step):
if len(self.workspaces) > 1:
self.workspaceNum += step
self.workspaceNum = self.workspaceNum % len(self.workspaces)
self.mainWindow = self.workspaces[self.workspaceNum]
self.tabs.setCurrentIndex(self.workspaceNum)
self.updWorkspaceMenu()
self.menuCheck()
if type(self.mainWindow) is not SaveFigureWindow:
if type(self.mainWindow.current) is views.CurrentMulti:
self.mainWindow.sideframe.checkChanged()
def duplicateWorkspace(self, sliceOnly=False, *args):
name = self.askName()
if sliceOnly:
data = copy.deepcopy(self.mainWindow.get_current().data1D)
data.setNoUndo(self.mainWindow.get_masterData().noUndo)
else:
data = copy.deepcopy(self.mainWindow.get_masterData())
if name is None:
return
self.workspaces.append(Main1DWindow(self, data, self.mainWindow.get_current()))
self.workspaces[-1].rename(name)
self.tabs.addTab(self.workspaces[-1], name)
self.workspaceNames.append(name)
self.changeMainWindow(name)
def renameWorkspace(self, *args):
tmp = self.workspaceNames[self.workspaceNum]
self.workspaceNames[self.workspaceNum] = ''
name = self.askName(tmp, tmp)
if name is None:
self.workspaceNames[self.workspaceNum] = tmp
return
self.workspaceNames[self.workspaceNum] = name
self.tabs.setTabText(self.workspaceNum, name)
self.updWorkspaceMenu()
self.workspaces[self.workspaceNum].rename(name)
def destroyWorkspace(self, num=None):
if self.mainWindow is None or self.menuActive is False:
return
if num is None:
num = self.workspaceNum
self.allowChange = False
self.tabs.removeTab(num)
self.allowChange = True
if num == self.workspaceNum:
self.mainWindow.kill()
self.mainWindow = None
else:
self.workspaces[num].kill()
del self.workspaceNames[num]
del self.workspaces[num]
if num == self.workspaceNum:
if num == len(self.workspaces):
self.workspaceNum = num - 1
if num < self.workspaceNum:
self.workspaceNum -= 1
if self.workspaces:
self.changeMainWindow(self.workspaceNames[self.workspaceNum])
else:
self.logo.show()
self.tabs.hide()
self.updWorkspaceMenu()
def updWorkspaceMenu(self):
self.activemenu.clear()
for i in self.workspaceNames:
self.activemenu.addAction(i, lambda i=i: self.changeMainWindow(i))
self.menuCheck()
def newWorkspace(self, masterData):
name = self.askName()
if name is None:
raise SsnakeException("No name given")
self.workspaces.append(Main1DWindow(self, masterData))
self.workspaces[-1].rename(name)
self.tabs.addTab(self.workspaces[-1], name)
self.workspaceNames.append(name)
self.changeMainWindow(name)
def createCombineWorkspaceWindow(self):
CombineWorkspaceWindow(self)
def createCombineLoadWindow(self):
CombineLoadWindow(self)
def combineWorkspace(self, combineNames):
wsname = self.askName()
if wsname is None:
return
i = self.workspaceNames.index(combineNames[0])
combineMasterData = copy.deepcopy(self.workspaces[i].get_masterData())
shapeRequired = combineMasterData.shape()
hyperShape = len(combineMasterData.data)
combineMasterData.split(1, -1)
for name in combineNames[1:]:
i = self.workspaceNames.index(name)
addData = self.workspaces[i].get_masterData()
if addData.shape() != shapeRequired:
raise SsnakeException("Not all the data has the same shape")
if len(addData.data) != hyperShape:
raise SsnakeException("Not all the data has the same hypercomplex shape")
combineMasterData.insert(addData.data, combineMasterData.shape()[0], 0)
self.workspaces.append(Main1DWindow(self, combineMasterData))
self.workspaces[-1].rename(wsname)
self.tabs.addTab(self.workspaces[-1], wsname)
self.workspaceNames.append(wsname)
self.changeMainWindow(wsname)
def loadFromMenu(self):
fileList = QtWidgets.QFileDialog.getOpenFileNames(self, 'Open File', self.lastLocation)
if isinstance(fileList, tuple):
fileList = fileList[0]
self.loadData(fileList)
def loadData(self, fileList):
for filePath in fileList:
if filePath: # if not cancelled
self.lastLocation = os.path.dirname(filePath) # Save used path
if not filePath:
return
masterData = io.autoLoad(filePath)
if masterData is None:
raise SsnakeException("Could not load data")
if masterData == -1:
dialog = AsciiLoadWindow(self, filePath)
if dialog.exec_():
if dialog.closed:
return
asciiInfo = (dialog.dataDimension, dialog.dataOrder, dialog.dataSpec, dialog.delim, dialog.sw, dialog.axisMulti)
masterData = io.autoLoad(filePath, [asciiInfo])
if self.defaultAskName:
name = self.askName(filePath, masterData.name)
if name is None:
return
else:
name = masterData.name
count = 0
while name in self.workspaceNames:
name = 'spectrum' + str(count)
count += 1
masterData.rename(name)
if masterData is not None:
self.workspaces.append(Main1DWindow(self, masterData))
self.tabs.addTab(self.workspaces[-1], name)
self.workspaceNames.append(name)
self.changeMainWindow(name)
def loadFitLibDir(self):
#fileName = QtWidgets.QFileDialog.getExistingDirectory(self, 'Open Library Directory', self.lastLocation)
fileName = QtWidgets.QFileDialog.getOpenFileName(self, 'Open Library File', self.lastLocation)
if isinstance(fileName, tuple):
fileName = fileName[0]
return fileName
def loadSIMPSONScript(self):
fileName = QtWidgets.QFileDialog.getOpenFileName(self, 'Open SIMPSON Script', self.lastLocation)
if isinstance(fileName, tuple):
fileName = fileName[0]
return fileName
def loadAndCombine(self, filePathList):
masterData = io.autoLoad(filePathList)
wsname = self.askName()
if wsname is None:
return
masterData.rename(wsname)
self.workspaces.append(Main1DWindow(self, masterData))
self.workspaces[-1].rename(wsname)
self.tabs.addTab(self.workspaces[-1], wsname)
self.workspaceNames.append(wsname)
self.changeMainWindow(wsname)
def dataFromFit(self, data, filePath, freq, sw, spec, wholeEcho, ref, xaxArray, axes):
name = self.askName()
if name is None:
return
masterData = sc.Spectrum(data,
filePath,
freq,
sw,
spec,
wholeEcho,
ref,
xaxArray,
history=['Data obtained from fit'],
name=name)
masterData.resetXax(axes)
self.workspaces.append(Main1DWindow(self, masterData))
self.tabs.addTab(self.workspaces[-1], name)
self.workspaceNames.append(name)
self.changeMainWindow(name)
def saveSimpsonFile(self):
self.mainWindow.get_mainWindow().SaveSimpsonFile()
def saveASCIIFile(self):
self.mainWindow.get_mainWindow().saveASCIIFile()
def saveJSONFile(self):
self.mainWindow.get_mainWindow().saveJSONFile()
def saveMatlabFile(self):
self.mainWindow.get_mainWindow().saveMatlabFile()
def saveFigure(self):
if self.mainWindow is None:
return
self.allowChange = False
self.menuEnable(False, True)
num = self.workspaces.index(self.mainWindow)
self.mainWindow = SaveFigureWindow(self, self.mainWindow)
self.tabs.removeTab(num)
self.tabs.insertTab(num, self.mainWindow, self.workspaceNames[num])
self.workspaces[num] = self.mainWindow
self.tabs.setCurrentIndex(num)
self.menuCheck()
self.allowChange = True
def closeSaveFigure(self, mainWindow):
self.allowChange = False
num = self.workspaces.index(self.mainWindow)
self.tabs.removeTab(num)
self.mainWindow = mainWindow
self.workspaces[num] = self.mainWindow
self.tabs.insertTab(num, self.mainWindow, self.workspaceNames[num])
self.tabs.setCurrentIndex(num)
self.menuEnable(True, True)
self.tabs.setCurrentIndex(num)
self.menuCheck()
self.allowChange = True
def createFitWindow(self, fitWindow):
if self.mainWindow is None:
return
self.allowChange = False
self.menuEnable(False, True)
num = self.workspaces.index(self.mainWindow)
self.tabs.removeTab(num)
self.mainWindow = fitWindow
self.tabs.insertTab(num, self.mainWindow, self.workspaceNames[num])
self.workspaces[num] = self.mainWindow
self.tabs.setCurrentIndex(num)
self.menuCheck()
self.allowChange = True
def closeFitWindow(self, mainWindow):
self.allowChange = False
num = self.workspaces.index(self.mainWindow)
self.tabs.removeTab(num)
del self.mainWindow
self.mainWindow = mainWindow
self.workspaces[num] = self.mainWindow
self.tabs.insertTab(num, self.mainWindow, self.workspaceNames[num])
self.menuEnable(True, True)
self.tabs.setCurrentIndex(num)
self.menuCheck()
self.allowChange = True
def updateMenu(self):
UpdateWindow(self)
def createShiftConversionWindow(self):
shiftConversionWindow(self)
def createDipolarDistanceWindow(self):
dipolarDistanceWindow(self)
def createQuadConversionWindow(self):
quadConversionWindow(self)
def createMqmasExtractWindow(self):
mqmasExtractWindow(self)
def createTempcalWindow(self):
tempCalWindow(self)
def nmrTable(self):
import nmrTable
nmrTable.PeriodicTable()
def fileQuit(self):
self.close()
def closeEvent(self, event):
quit_msg = "Are you sure you want to close ssNake?"
close = True
if len(self.workspaces) != 0:
close = QtWidgets.QMessageBox.Yes == QtWidgets.QMessageBox.question(self, 'Close', quit_msg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if close:
for item in fit.stopDict.keys(): # Send stop commands to all threads
fit.stopDict[item] = True
event.accept()
else:
event.ignore()
######################################################################################################
class Main1DWindow(QtWidgets.QWidget):
def __init__(self, father, masterData, duplicateCurrent=None):
super(Main1DWindow, self).__init__(father)
self.fig = Figure()
self.canvas = FigureCanvas(self.fig)
grid = QtWidgets.QGridLayout(self)
grid.addWidget(self.canvas, 0, 0)
self.currentMacro = None
self.redoMacro = []
self.monitor = None # Monitor of files
self.monitorMacros = []
self.father = father
self.masterData = masterData
if duplicateCurrent is not None:
self.current = duplicateCurrent.copyCurrent(self, self.fig, self.canvas, masterData)
else:
self.current = views.Current1D(self, self.fig, self.canvas, masterData)
self.menubar = self.father.menubar
self.sideframe = SideFrame(self)
grid.addWidget(self.sideframe, 0, 1)
self.bottomframe = BottomFrame(self)
grid.addWidget(self.bottomframe, 1, 0, 1, 2)
self.textframe = TextFrame(self)
grid.addWidget(self.textframe, 2, 0, 1, 2)
grid.setColumnStretch(0, 1)
grid.setRowStretch(0, 1)
self.grid = grid
self.canvas.mpl_connect('button_press_event', self.buttonPress)
self.canvas.mpl_connect('button_release_event', self.buttonRelease)
self.canvas.mpl_connect('motion_notify_event', self.pan)
self.canvas.mpl_connect('scroll_event', self.scroll)
self.canvas.setFocusPolicy(QtCore.Qt.ClickFocus)
self.canvas.setFocus()
def rename(self, name):
self.current.rename(name)
def buttonPress(self, event):
self.current.buttonPress(event)
def buttonRelease(self, event):
self.current.buttonRelease(event)
def pan(self, event):
self.current.pan(event)
def scroll(self, event):
self.current.scroll(event)
def get_mainWindow(self):
return self
def get_masterData(self):
return self.masterData
def get_current(self):
return self.current
def kill(self):
self.current.kill()
del self.current
del self.masterData
del self.canvas
del self.fig # fig is destroyed
for i in reversed(range(self.grid.count())):
self.grid.itemAt(i).widget().deleteLater()
self.grid.deleteLater()
self.bottomframe.kill()
del self.bottomframe
self.textframe.kill()
del self.textframe
self.sideframe.kill()
del self.sideframe
self.deleteLater()
gc.collect()
def rescue(self):
self.current.kill()
self.current = views.Current1D(self, self.current.fig, self.current.canvas, self.masterData)
def menuEnable(self, enable=True):
self.father.menuEnable(enable)
self.sideframe.frameEnable(enable)
self.bottomframe.frameEnable(enable)
self.textframe.frameEnable(enable)
if enable:
self.menuCheck()
def menuCheck(self):
self.father.menuCheck()
def runMacro(self, macro, display=True):
for i, _ in enumerate(macro):
iter1 = macro[i] # Do not loop over the macro list itself to prevent recursion if the running macro is also the one being recorded
self.addMacro(iter1)
try:
getattr(self.masterData, iter1[0])(*iter1[1])
except AttributeError:
raise SsnakeException('unknown macro command: ' + iter1[0])
if display:
self.current.upd() # get the first slice of data
self.current.showFid() # plot the data
self.current.plotReset() # reset the axes limits
self.updAllFrames()
self.menuCheck()
def addMacro(self, macroStep):
if self.currentMacro is not None:
self.father.macroAdd(self.currentMacro, macroStep)
self.redoMacro = []
def saveJSONFile(self):
WorkspaceName = self.father.workspaceNames[self.father.workspaceNum] # Set name of file to be saved to workspace name to start
name = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', self.father.lastLocation + os.path.sep + WorkspaceName + '.json', 'JSON (*.json)')
if isinstance(name, tuple):
name = name[0]
if not name:
return
self.father.lastLocation = os.path.dirname(name) # Save used path
io.saveJSONFile(name, self.masterData)
def saveMatlabFile(self):
WorkspaceName = self.father.workspaceNames[self.father.workspaceNum] # Set name of file to be saved to workspace name to start
name = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', self.father.lastLocation + os.path.sep + WorkspaceName + '.mat', 'MATLAB file (*.mat)')
if isinstance(name, tuple):
name = name[0]
if not name:
return
self.father.lastLocation = os.path.dirname(name) # Save used path
io.saveMatlabFile(name, self.masterData, self.father.workspaceNames[self.father.workspaceNum])
def SaveSimpsonFile(self):
if self.masterData.ndim() > 2:
raise SsnakeException('Saving to Simpson format only allowed for 1D and 2D data!')
WorkspaceName = self.father.workspaceNames[self.father.workspaceNum] # Set name of file to be saved to workspace name to start
if sum(self.masterData.spec) / len(self.masterData.spec) == 1:
name = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', self.father.lastLocation + os.path.sep + WorkspaceName + '.spe', 'SIMPSON file (*.spe)')
if isinstance(name, tuple):
name = name[0]
if not name:
return
elif sum(self.masterData.spec) == 0:
name = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', self.father.lastLocation + os.path.sep + WorkspaceName + '.fid', 'SIMPSON file (*.fid)')
if isinstance(name, tuple):
name = name[0]
if not name:
return
else:
raise SsnakeException('Saving to Simpson format not allowed for mixed time/frequency domain data!')
self.father.lastLocation = os.path.dirname(name) # Save used path
io.saveSimpsonFile(name, self.masterData)
def saveASCIIFile(self):
if self.masterData.ndim() > 2:
raise SsnakeException('Saving to ASCII format only allowed for 1D and 2D data!')
WorkspaceName = self.father.workspaceNames[self.father.workspaceNum] # Set name of file to be saved to workspace name to start
name = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', self.father.lastLocation + os.path.sep + WorkspaceName + '.txt', 'ASCII file (*.txt)')
if isinstance(name, tuple):
name = name[0]
if not name:
return
self.father.lastLocation = os.path.dirname(name) # Save used path
axMult = self.current.getCurrentAxMult()
io.saveASCIIFile(name, self.masterData, axMult)
def reloadLast(self):
self.current.reload()
self.updAllFrames()
self.menuCheck()
gc.collect()
def monitorLoad(self, filePath, delay=0.5):
self.monitor.blockSignals(True)
if not os.path.exists(filePath):
self.stopMonitor()
return
loadData = io.autoLoad(*self.masterData.filePath)
self.masterData.restoreData(loadData, None)
for name in self.monitorMacros:
self.runMacro(self.father.macros[name], display=False)
self.current.upd()
# self.current.plotReset()
self.current.showFid()
self.updAllFrames()
self.menuCheck()
QtCore.QTimer.singleShot(delay * 1000, lambda: self.monitor.blockSignals(False))
if filePath in self.monitor.files() or filePath in self.monitor.directories():
return
self.monitor.addPath(filePath)
def startMonitor(self, macroNames, delay=0.5):
self.monitorMacros = macroNames
self.monitor = QtCore.QFileSystemWatcher(self.masterData.filePath[0], self)
self.monitor.fileChanged.connect(lambda a: self.monitorLoad(a, delay))
self.monitor.directoryChanged.connect(lambda a: self.monitorLoad(a, delay))
def stopMonitor(self):
self.monitorMacros = []
if self.monitor is not None:
for name in self.masterData.filePath[0]:
self.monitor.removePath(name)
self.monitor = None
def real(self):
self.current.real()
self.sideframe.upd()
self.menuCheck()
def imag(self):
self.current.imag()
self.sideframe.upd()
self.menuCheck()
def abs(self):
self.current.abs()
self.sideframe.upd()
self.menuCheck()
def conj(self):
self.current.conj()
self.sideframe.upd()
self.menuCheck()
def fourier(self):
self.current.complexFourier()
self.bottomframe.upd()
self.menuCheck()
def realFourier(self):
self.current.realFourier()
self.bottomframe.upd()
self.menuCheck()
def fftshift(self):
self.current.fftshift()
self.updAllFrames()
self.menuCheck()
def invFftshift(self):
self.current.fftshift(inv=True)
self.updAllFrames()
self.menuCheck()
def diff(self):
self.current.diff()
self.updAllFrames()
self.menuCheck()
def cumsum(self):
self.current.cumsum()
self.updAllFrames()
self.menuCheck()
def hilbert(self):
self.current.hilbert()
self.menuCheck()
def states(self):
self.current.states()
self.updAllFrames()
self.menuCheck()
def statesTPPI(self):
self.current.statesTPPI()
self.updAllFrames()
self.menuCheck()
def echoAntiEcho(self):
self.current.echoAntiEcho()
self.updAllFrames()
self.menuCheck()
def setFreq(self, freq, sw):
self.current.setFreq(freq, sw)
self.menuCheck()
def flipLR(self):
self.current.flipLR()
self.menuCheck()
def directAutoPhase(self, phaseNum):
self.current.directAutoPhase(phaseNum)
self.menuCheck()
def autoPhaseAll(self, phaseNum):
self.current.autoPhaseAll(phaseNum)
self.menuCheck()
def CorrectDigitalFilter(self):
if self.current.data.dFilter is None:
raise SsnakeException('Digital filter: no value defined')
self.current.correctDFilter()
self.menuCheck()
def createRelaxWindow(self):
self.father.createFitWindow(fit.TabFittingWindow(self.father, self.father.mainWindow, 'relax'))
def createDiffusionWindow(self):
self.father.createFitWindow(fit.TabFittingWindow(self.father, self.father.mainWindow, 'diffusion'))
def createPeakDeconvWindow(self):
self.father.createFitWindow(fit.TabFittingWindow(self.father, self.father.mainWindow, 'peakdeconv'))
def createCsaDeconvWindow(self):
self.father.createFitWindow(fit.TabFittingWindow(self.father, self.father.mainWindow, 'csadeconv'))
def createQuadDeconvWindow(self):
if self.current.freq() == 0.0:
raise SsnakeException("Please set the spectrometer frequency first!")
self.father.createFitWindow(fit.TabFittingWindow(self.father, self.father.mainWindow, 'quaddeconv'))
def createQuadCSADeconvWindow(self):
if self.current.freq() == 0.0:
raise SsnakeException("Please set the spectrometer frequency first!")
self.father.createFitWindow(fit.TabFittingWindow(self.father, self.father.mainWindow, 'quadcsadeconv'))
def createQuadCzjzekWindow(self):
if self.current.freq() == 0.0:
raise SsnakeException("Please set the spectrometer frequency first!")
self.father.createFitWindow(fit.TabFittingWindow(self.father, self.father.mainWindow, 'quadczjzek'))
def createMQMASWindow(self):
if self.masterData.ndim() < 2:
raise SsnakeException("Data has not enough dimensions for MQMAS fitting")
if self.current.freq() == 0.0:
raise SsnakeException("Please set the spectrometer frequency first!")
self.father.createFitWindow(fit.TabFittingWindow(self.father, self.father.mainWindow, 'mqmas'))
def createMQMASCzjzekWindow(self):
if self.masterData.ndim() < 2:
raise SsnakeException("Data has not enough dimensions for MQMAS fitting")
if self.current.freq() == 0.0:
raise SsnakeException("Please set the spectrometer frequency first!")
self.father.createFitWindow(fit.TabFittingWindow(self.father, self.father.mainWindow, 'mqmasczjzek'))
def createExternalFitWindow(self):
self.father.createFitWindow(fit.TabFittingWindow(self.father, self.father.mainWindow, 'external'))
def createFunctionFitWindow(self):
self.father.createFitWindow(fit.TabFittingWindow(self.father, self.father.mainWindow, 'function'))
def plot1D(self):
tmpcurrent = views.Current1D(self, self.fig, self.canvas, self.masterData, self.current)
self.current.kill()
del self.current
self.current = tmpcurrent
self.updAllFrames()
self.menuCheck()
def plotScatter(self):
tmpcurrent = views.CurrentScatter(self, self.fig, self.canvas, self.masterData, self.current)
self.current.kill()
del self.current
self.current = tmpcurrent
self.updAllFrames()
self.menuCheck()
def plotStack(self):
if len(self.masterData.shape()) < 2:
raise SsnakeException("Data does not have enough dimensions")
tmpcurrent = views.CurrentStacked(self, self.fig, self.canvas, self.masterData, self.current)
self.current.kill()
del self.current
self.current = tmpcurrent
self.updAllFrames()
self.menuCheck()
def plotArray(self):
if len(self.masterData.shape()) < 2:
raise SsnakeException("Data does not have enough dimensions")
tmpcurrent = views.CurrentArrayed(self, self.fig, self.canvas, self.masterData, self.current)
self.current.kill()
del self.current
self.current = tmpcurrent
self.updAllFrames()
self.menuCheck()
def plotContour(self):
if len(self.masterData.shape()) < 2:
raise SsnakeException("Data does not have enough dimensions")
tmpcurrent = views.CurrentContour(self, self.fig, self.canvas, self.masterData, self.current)
self.current.kill()
del self.current
self.current = tmpcurrent
self.updAllFrames()
self.menuCheck()
def plotMultiContour(self):
if len(self.masterData.shape()) < 2:
raise SsnakeException("Data does not have enough dimensions")
tmpcurrent = views.CurrentMultiContour(self, self.fig, self.canvas, self.masterData, self.current)
self.current.kill()
del self.current
self.current = tmpcurrent
self.updAllFrames()
self.menuCheck()
def plotColour2D(self):
if len(self.masterData.shape()) < 2:
raise SsnakeException("Data does not have enough dimensions")
tmpcurrent = views.CurrentColour2D(self, self.fig, self.canvas, self.masterData, self.current)
self.current.kill()
del self.current
self.current = tmpcurrent
self.updAllFrames()
self.menuCheck()
def plotMulti(self):
tmpcurrent = views.CurrentMulti(self, self.fig, self.canvas, self.masterData, self.current)
self.current.kill()
del self.current
self.current = tmpcurrent
self.updAllFrames()
self.menuCheck()
def updAllFrames(self):
self.sideframe.upd()
self.bottomframe.upd()
self.textframe.upd()
def undo(self, *args):
self.father.dispMsg(self.masterData.undo())
self.current.upd()
self.current.showFid()
self.current.plotReset()
self.updAllFrames()
if self.currentMacro is not None:
self.redoMacro.append(self.father.macros[self.currentMacro].pop())
self.menuCheck()
def redo(self, *args):
self.masterData.redo()
self.current.upd()
self.current.showFid()
self.current.plotReset()
self.updAllFrames()
if self.currentMacro is not None:
self.father.macroAdd(self.currentMacro, self.redoMacro.pop())
self.menuCheck()
def clearUndo(self):
self.masterData.clearUndo()
self.menuCheck()
########################################################################################
class SideFrame(QtWidgets.QScrollArea):
FITTING = False
def __init__(self, parent):
super(SideFrame, self).__init__(parent)
self.father = parent
self.entries = []
self.plotIs2D = False
content = QtWidgets.QWidget()
grid = QtWidgets.QGridLayout(content)
grid.setSizeConstraint(QtWidgets.QLayout.SetFixedSize)
frame1Widget = QtWidgets.QWidget()
frame2Widget = QtWidgets.QWidget()
grid.addWidget(frame1Widget, 0, 0)
grid.addWidget(frame2Widget, 1, 0)
self.frame1 = QtWidgets.QGridLayout()
self.frame2 = QtWidgets.QGridLayout()
frame1Widget.setLayout(self.frame1)
frame2Widget.setLayout(self.frame2)
self.frame1.setAlignment(QtCore.Qt.AlignTop)
self.frame2.setAlignment(QtCore.Qt.AlignTop)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.grid = grid
self.setWidget(content)
self.upd()
def kill(self):
for i in reversed(range(self.grid.count())):
self.grid.itemAt(i).widget().deleteLater()
self.grid.deleteLater()
def frameEnable(self, enable=True):
self.setEnabled(enable)
def upd(self):
current = self.father.current
self.shape = current.data.shape()
self.length = len(self.shape)
for i in reversed(range(self.frame1.count())):
item = self.frame1.itemAt(i).widget()
if self.FITTING:
item.hide()
self.frame1.removeWidget(item)
item.deleteLater()
for i in reversed(range(self.frame2.count())):
item = self.frame2.itemAt(i).widget()
if self.FITTING:
item.hide()
self.frame2.removeWidget(item)
item.deleteLater()
offset = 0
self.plotIs2D = isinstance(current, views.CurrentStacked)
if self.plotIs2D:
offset = 1
self.entries = []
self.buttons1 = []
self.buttons1Group = QtWidgets.QButtonGroup(self)
self.buttons1Group.buttonClicked.connect(lambda: self.setAxes(True))
self.buttons2 = []
self.buttons2Group = QtWidgets.QButtonGroup(self)
self.buttons2Group.buttonClicked.connect(lambda: self.setAxes(False))
if self.length > 1:
for num in range(self.length):
if not self.FITTING:
self.buttons1.append(QtWidgets.QRadioButton(''))
self.buttons1Group.addButton(self.buttons1[num], num)
self.buttons1[num].setToolTip(TOOLTIPS['sideframeDimension1'])
self.frame1.addWidget(self.buttons1[num], num * 2 + 1, 0)
if self.plotIs2D:
self.buttons2.append(QtWidgets.QRadioButton(''))
self.buttons2Group.addButton(self.buttons2[num], num)
self.buttons2[num].setToolTip(TOOLTIPS['sideframeDimension2'])
self.frame1.addWidget(self.buttons2[num], num * 2 + 1, 1)
if current.isComplex(num):
tmpLabel = "*D"
else:
tmpLabel = "D"
self.frame1.addWidget(wc.QLabel(tmpLabel + str(num + 1), self), num * 2, 1 + offset)
self.entries.append(wc.SliceSpinBox(self, 0, self.shape[num] - 1))
self.entries[-1].setToolTip(TOOLTIPS['sideFrameDimensionSlice'])
self.frame1.addWidget(self.entries[num], num * 2 + 1, 1 + offset)
if not self.plotIs2D:
self.entries[num].setValue(current.locList[num])
else:
self.entries[num].setValue(current.locList[num])
if self.FITTING and num in current.axes:
self.entries[num].setDisabled(True)
self.entries[num].valueChanged.connect(lambda event, num=num: self.getSlice(num))
if type(current) in (views.CurrentStacked, views.CurrentArrayed):
if current.viewSettings["stackBegin"] is not None:
from2D = current.viewSettings["stackBegin"]
else:
from2D = 0
if current.viewSettings["stackEnd"] is not None:
to2D = current.viewSettings["stackEnd"]
else:
to2D = self.shape[current.axes[-2]]
if current.viewSettings["stackStep"] is not None:
step2D = current.viewSettings["stackStep"]
else:
step2D = 1
self.frame2.addWidget(wc.QLabel("From", self), 1, 0)
self.fromSpin = wc.SliceSpinBox(self, 0, to2D - 1)
self.fromSpin.setToolTip(TOOLTIPS['sideFrom'])
self.frame2.addWidget(self.fromSpin, 2, 0)
self.fromSpin.setValue(from2D)
self.fromSpin.valueChanged.connect(self.setToFrom)
self.frame2.addWidget(wc.QLabel("To", self), 3, 0)
self.toSpin = wc.SliceSpinBox(self, from2D + 1, self.shape[current.axes[-2]])
self.toSpin.setToolTip(TOOLTIPS['sideTo'])
self.frame2.addWidget(self.toSpin, 4, 0)
self.toSpin.setValue(to2D)
self.toSpin.valueChanged.connect(self.setToFrom)
self.frame2.addWidget(wc.QLabel("Step", self), 5, 0)
self.stepSpin = wc.SliceSpinBox(self, 1, self.shape[current.axes[-2]])
self.stepSpin.setToolTip(TOOLTIPS['stackStep'])
self.frame2.addWidget(self.stepSpin, 6, 0)
self.stepSpin.setValue(step2D)
self.stepSpin.valueChanged.connect(self.setToFrom)
self.frame2.addWidget(wc.QLabel("Spacing", self), 7, 0)
self.spacingEntry = QtWidgets.QLineEdit(self)
self.spacingEntry.setToolTip(TOOLTIPS['stackSpacing'])
self.spacingEntry.setText('%#.3g' % current.viewSettings["spacing"])
self.spacingEntry.returnPressed.connect(self.setSpacing)
self.frame2.addWidget(self.spacingEntry, 8, 0)
if isinstance(current, (views.CurrentContour)):
if type(current) in (views.CurrentContour, views.CurrentMultiContour):
self.contourTypeGroup = QtWidgets.QGroupBox('Contour type:')
self.contourTypeFrame = QtWidgets.QGridLayout()
self.contourNumberLabel = wc.QLeftLabel("Number:", self)
self.contourTypeFrame.addWidget(self.contourNumberLabel, 0, 0)
self.numLEntry = wc.SsnakeSpinBox()
self.numLEntry.setMaximum(100000)
self.numLEntry.setMinimum(1)
self.numLEntry.setToolTip(TOOLTIPS['contourNumber'])
self.numLEntry.setValue(current.viewSettings["numLevels"])
self.numLEntry.valueChanged.connect(self.setContour)
self.contourTypeFrame.addWidget(self.numLEntry, 0, 1)
self.contourTypeFrame.addWidget(wc.QLeftLabel("Sign:", self), 1, 0)
self.contourSignEntry = QtWidgets.QComboBox()
self.contourSignEntry.setToolTip(TOOLTIPS['contourSign'])
self.contourSignEntry.addItems(['Both', '+ only', '- only'])
self.contourSignEntry.setCurrentIndex(current.viewSettings["contourSign"])
self.contourSignEntry.currentIndexChanged.connect(self.setContour)
self.contourTypeFrame.addWidget(self.contourSignEntry, 1, 1)
self.contourTypeLabel = wc.QLeftLabel("Type:", self)
self.contourTypeFrame.addWidget(self.contourTypeLabel, 2, 0)
self.contourTypeEntry = QtWidgets.QComboBox()
self.contourTypeEntry.setToolTip(TOOLTIPS['contourType'])
self.contourTypeEntry.addItems(['Linear', 'Multiplier'])
self.contourTypeEntry.setCurrentIndex(current.viewSettings["contourType"])
self.contourTypeEntry.currentIndexChanged.connect(self.setContour)
self.contourTypeFrame.addWidget(self.contourTypeEntry, 2, 1)
self.multiValueLabel = wc.QLeftLabel("Multiplier:", self)
self.contourTypeFrame.addWidget(self.multiValueLabel, 3, 0)
self.multiValue = wc.QLineEdit(current.viewSettings["multiValue"], self.setContour)
self.multiValue.setToolTip(TOOLTIPS['contourMultiplier'])
self.multiValue.setMaximumWidth(120)
self.contourTypeFrame.addWidget(self.multiValue, 3, 1)
if current.viewSettings["contourType"] != 1:
self.multiValueLabel.hide()
self.multiValue.hide()
self.contourTypeGroup.setLayout(self.contourTypeFrame)
self.frame2.addWidget(self.contourTypeGroup, 6, 0, 1, 3)
# Contour limits
self.contourLimitsGroup = QtWidgets.QGroupBox('Contour limits [%]:')
self.contourLimitsFrame = QtWidgets.QGridLayout()
self.maxLEntry = wc.QLineEdit(format(current.viewSettings["maxLevels"] * 100.0, '.7g'), self.setContour)
self.maxLEntry.setMaximumWidth(120)
self.maxLEntry.setToolTip(TOOLTIPS['contourMax'])
self.contourLimitsFrame.addWidget(self.maxLEntry, 1, 1)
self.minLEntry = wc.QLineEdit(format(current.viewSettings["minLevels"] * 100.0, '.7g'), self.setContour)
self.minLEntry.setMaximumWidth(120)
self.minLEntry.setToolTip(TOOLTIPS['contourMin'])
self.contourLimitsFrame.addWidget(self.minLEntry, 2, 1)
self.contourLimType = QtWidgets.QComboBox()
self.contourLimType.addItems(['Current 2D', 'Full data'])
self.contourLimType.setCurrentIndex(current.viewSettings["limitType"])
self.contourLimType.setToolTip(TOOLTIPS['contourLimType'])
self.contourLimType.currentIndexChanged.connect(self.setContour)
self.contourLimitsFrame.addWidget(self.contourLimType, 0, 1)
self.maxLabel = wc.QLeftLabel("Max:", self)
self.minLabel = wc.QLeftLabel("Min:", self)
self.relLabel = wc.QLeftLabel("Rel. to:", self)
self.contourLimitsFrame.addWidget(self.relLabel, 0, 0)
self.contourLimitsFrame.addWidget(self.maxLabel, 1, 0)
self.contourLimitsFrame.addWidget(self.minLabel, 2, 0)
self.contourLimitsGroup.setLayout(self.contourLimitsFrame)
self.frame2.addWidget(self.contourLimitsGroup, 7, 0, 1, 3)
# Projections
self.contourProjGroup = QtWidgets.QGroupBox('Projections:')
self.contourProjFrame = QtWidgets.QGridLayout()
self.projTopLabel = wc.QLeftLabel("Top:", self)
self.contourProjFrame.addWidget(self.projTopLabel, 0, 0)
self.projDropTop = QtWidgets.QComboBox()
self.projDropTop.setToolTip(TOOLTIPS['contourTopProjection'])
self.projDropTop.addItems(["Sum", "Max", "Min", "Off", "Slice", "Diagonal"])
self.projDropTop.setCurrentIndex(current.viewSettings["projTop"])
self.projDropTop.activated.connect(lambda val, self=self: self.changeProj(val, 1))
self.contourProjFrame.addWidget(self.projDropTop, 0, 1)
self.projTraceTop = wc.SsnakeSpinBox()
self.projTraceTop.setMaximum(self.shape[current.axes[-2]] - 1)
self.projTraceTop.setMinimum(0)
self.projTraceTop.setValue(current.viewSettings["projPos"][0])
self.projTraceTop.valueChanged.connect(lambda val, self=self: self.changeTrace(val, 0))
self.projTraceTop.setToolTip(TOOLTIPS['contourProjTopTrac'])
self.contourProjFrame.addWidget(self.projTraceTop, 1, 1)
if current.viewSettings["projTop"] != 4:
self.projTraceTop.hide()
self.projRightLabel = wc.QLeftLabel("Right:", self)
self.contourProjFrame.addWidget(self.projRightLabel, 2, 0)
self.projDropRight = QtWidgets.QComboBox()
self.projDropRight.setToolTip(TOOLTIPS['contourRightProjection'])
self.projDropRight.addItems(["Sum", "Max", "Min", "Off", "Slice", "Diagonal"])
self.projDropRight.setCurrentIndex(current.viewSettings["projRight"])
self.projDropRight.activated.connect(lambda val, self=self: self.changeProj(val, 2))
self.contourProjFrame.addWidget(self.projDropRight, 2, 1)
self.projTraceRight = wc.SsnakeSpinBox()
self.projTraceRight.setMaximum(self.shape[current.axes[-1]] - 1)
self.projTraceRight.setMinimum(0)
self.projTraceRight.setValue(current.viewSettings["projPos"][1])
self.projTraceRight.valueChanged.connect(lambda val, self=self: self.changeTrace(val, 1))
self.projTraceRight.setToolTip(TOOLTIPS['contourProjRightTrac'])
self.contourProjFrame.addWidget(self.projTraceRight, 3, 1)
if current.viewSettings["projRight"] != 4:
self.projTraceRight.hide()
self.selectTraceButton = QtWidgets.QPushButton("Select slices", self)
self.selectTraceButton.clicked.connect(self.selectTraces)
self.contourProjFrame.addWidget(self.selectTraceButton, 4, 1)
if (current.viewSettings["projTop"] != 4) and (current.viewSettings["projRight"] != 4):
self.selectTraceButton.hide()
# Ranges
self.rangeCheckbox = QtWidgets.QCheckBox('Projection ranges', self)
self.rangeCheckbox.setChecked(current.viewSettings["projLimitsBool"])
self.rangeCheckbox.stateChanged.connect(self.activateRanges)
self.rangeCheckbox.setToolTip(TOOLTIPS['contourProjRanges'])
self.contourProjFrame.addWidget(self.rangeCheckbox, 5, 0, 1, 2)
self.projTopRangeMaxLabel = wc.QLeftLabel("Top max:", self)
self.projTopRangeMaxLabel.hide()
self.contourProjFrame.addWidget(self.projTopRangeMaxLabel, 6, 0)
self.projTopRangeMax = wc.SsnakeSpinBox()
self.projTopRangeMax.setMaximum(self.shape[current.axes[-2]] - 1)
self.projTopRangeMax.setMinimum(0)
self.projTopRangeMax.setToolTip(TOOLTIPS['contourTopRangeMax'])
if current.viewSettings["projLimits"][0] is None:
self.projTopRangeMax.setValue(self.shape[current.axes[-2]] - 1)
else:
self.projTopRangeMax.setValue(current.viewSettings["projLimits"][0])
self.projTopRangeMax.valueChanged.connect(self.changeRanges)
self.projTopRangeMax.hide()
self.contourProjFrame.addWidget(self.projTopRangeMax, 6, 1)
self.projTopRangeMinLabel = wc.QLeftLabel("Top min:", self)
self.projTopRangeMinLabel.hide()
self.contourProjFrame.addWidget(self.projTopRangeMinLabel, 7, 0)
self.projTopRangeMin = wc.SsnakeSpinBox()
self.projTopRangeMin.setMaximum(self.shape[current.axes[-2]] - 1)
self.projTopRangeMin.setMinimum(0)
self.projTopRangeMin.setToolTip(TOOLTIPS['contourTopRangeMin'])
if current.viewSettings["projLimits"][1] is None:
self.projTopRangeMin.setValue(0)
else:
self.projTopRangeMin.setValue(current.viewSettings["projLimits"][1])
self.projTopRangeMin.valueChanged.connect(self.changeRanges)
self.projTopRangeMin.hide()
self.contourProjFrame.addWidget(self.projTopRangeMin, 7, 1)
self.projRightRangeMaxLabel = wc.QLeftLabel("Right max:", self)
self.projRightRangeMaxLabel.hide()
self.contourProjFrame.addWidget(self.projRightRangeMaxLabel, 8, 0)
self.projRightRangeMax = wc.SsnakeSpinBox()
self.projRightRangeMax.setMaximum(self.shape[current.axes[-1]] - 1)
self.projRightRangeMax.setMinimum(0)
self.projRightRangeMax.setToolTip(TOOLTIPS['contourRightRangeMax'])
if current.viewSettings["projLimits"][2] is None:
self.projRightRangeMax.setValue(self.shape[current.axes[-1]] - 1)
else:
self.projRightRangeMax.setValue(current.viewSettings["projLimits"][2])
self.projRightRangeMax.valueChanged.connect(self.changeRanges)
self.projRightRangeMax.hide()
self.contourProjFrame.addWidget(self.projRightRangeMax, 8, 1)
self.projRightRangeMinLabel = wc.QLeftLabel("Right min:", self)
self.contourProjFrame.addWidget(self.projRightRangeMinLabel, 9, 0)
self.projRightRangeMinLabel.hide()
self.projRightRangeMin = wc.SsnakeSpinBox()
self.projRightRangeMin.setMaximum(self.shape[current.axes[-1]] - 1)
self.projRightRangeMin.setMinimum(0)
self.projRightRangeMin.setToolTip(TOOLTIPS['contourRightRangeMin'])
if current.viewSettings["projLimits"][3] is None:
self.projRightRangeMin.setValue(0)
else:
self.projRightRangeMin.setValue(current.viewSettings["projLimits"][3])
self.projRightRangeMin.valueChanged.connect(self.changeRanges)
self.projRightRangeMin.hide()
self.contourProjFrame.addWidget(self.projRightRangeMin, 9, 1)
self.contourProjGroup.setLayout(self.contourProjFrame)
self.frame2.addWidget(self.contourProjGroup, 9, 0, 1, 3)
self.activateRanges(self.rangeCheckbox.checkState())
# Diagonal group
self.diagonalGroup = QtWidgets.QGroupBox('Diagonal:')
self.diagonalGroup.setCheckable(True)
self.diagonalGroup.setChecked(current.viewSettings["diagonalBool"])
self.diagonalGroup.toggled.connect(self.switchDiagonal)
self.diagonalGroup.setToolTip(TOOLTIPS['contourDiagonal'])
self.diagonalFrame = QtWidgets.QGridLayout()
self.diagMultiLabel = wc.QLeftLabel("Multiplier:", self)
self.diagonalFrame.addWidget(self.diagMultiLabel, 0, 0)
self.diagonalEntry = wc.QLineEdit(current.viewSettings["diagonalMult"], self.setDiagonal)
self.diagonalEntry.setMaximumWidth(120)
self.diagonalEntry.setToolTip(TOOLTIPS['contourDiagonalMulti'])
self.diagonalFrame.addWidget(self.diagonalEntry, 0, 1)
self.diagonalGroup.setLayout(self.diagonalFrame)
self.frame2.addWidget(self.diagonalGroup, 10, 0, 1, 3)
if not self.FITTING:
self.buttons1Group.button(current.axes[-1]).toggle()
if self.plotIs2D:
self.buttons2Group.button(current.axes[-2]).toggle()
if isinstance(current, (views.CurrentMulti, views.CurrentMultiContour)):
self.extraEntries = []
self.extraButtons1 = []
self.extraButtons1Group = []
self.extraButtons2 = []
self.extraButtons2Group = []
self.nameLabels = []
iter1 = 0
for i in range(len(current.viewSettings["extraData"])):
frameWidget = QtWidgets.QWidget(self)
frame = QtWidgets.QGridLayout(frameWidget)
self.frame2.addWidget(frameWidget, iter1, 0)
frameWidget.setLayout(frame)
name = current.viewSettings["extraName"][i]
if len(name) > 20:
name = name[:20]
self.nameLabels.append(wc.QLabel(name, self))
frame.addWidget(self.nameLabels[i], 0, 0, 1, 3)
self.nameLabels[i].setStyleSheet("QLabel { color: rgb" + str(current.getExtraColor(i)) + ";}")
colorbutton = QtWidgets.QPushButton("Colour", self)
colorbutton.clicked.connect(lambda arg, num=i: self.setExtraColor(num))
colorbutton.setToolTip(TOOLTIPS['multiplotColour'])
frame.addWidget(colorbutton, 1, 0)
button = QtWidgets.QPushButton("x", self)
button.clicked.connect(lambda arg, num=i: self.delMultiSpec(num))
button.setToolTip(TOOLTIPS['multiplotX'])
if isinstance(current, (views.CurrentMulti)):
frame.addWidget(button, 1, 1)
self.OOM = self.father.current.getOOM() # Order of Magnitude
self.scaleLabel = wc.QLeftLabel("Scale:", self)
frame.addWidget(self.scaleLabel, 2, 0)
self.offsetLabel = wc.QLeftLabel(u"Offset (×1e" + str(self.OOM) + "):", self)
frame.addWidget(self.offsetLabel, 3, 0)
self.shiftLabel = wc.QLeftLabel("Shift:", self)
frame.addWidget(self.shiftLabel, 4, 0)
scaleEntry = wc.SsnakeDoubleSpinBox()
scaleEntry.setDecimals(4)
scaleEntry.setMaximum(1e3)
scaleEntry.setMinimum(-1e3)
scaleEntry.setSingleStep(0.1)
scaleEntry.setValue(self.father.current.viewSettings["extraScale"][i])
scaleEntry.valueChanged.connect(lambda arg, num=i: self.setScale(arg, num))
scaleEntry.setToolTip(TOOLTIPS['multiplotScale'])
frame.addWidget(scaleEntry, 2, 1)
offsetEntry = wc.SsnakeDoubleSpinBox()
offsetEntry.setDecimals(4)
offsetEntry.setMaximum(1e3)
offsetEntry.setMinimum(-1e3)
offsetEntry.setSingleStep(0.1)
offsetEntry.setValue(self.father.current.viewSettings["extraOffset"][i] / (10**self.OOM))
offsetEntry.valueChanged.connect(lambda arg, num=i: self.setOffset(arg, num))
offsetEntry.setToolTip(TOOLTIPS['multiplotOffset'])
frame.addWidget(offsetEntry, 3, 1)
shiftEntry = wc.SsnakeDoubleSpinBox()
shiftEntry.setDecimals(4)
shiftEntry.setMaximum(1e3)
shiftEntry.setMinimum(-1e3)
shiftEntry.setSingleStep(0.1)
shiftEntry.setValue(self.father.current.viewSettings["extraShift"][i])
shiftEntry.valueChanged.connect(lambda arg, num=i: self.setShift(arg, num))
shiftEntry.setToolTip(TOOLTIPS['multiplotShift1'])
frame.addWidget(shiftEntry, 4, 1)
elif isinstance(current, (views.CurrentMultiContour)):
frame.addWidget(button, 1, 1, 1, 2)
self.OOM = self.father.current.getOOM() # Order of Magnitude
self.scaleLabel = wc.QLeftLabel("Scale:", self)
frame.addWidget(self.scaleLabel, 2, 0)
self.shift1Label = wc.QLeftLabel("x Shift:", self)
frame.addWidget(self.shift1Label, 3, 0)
self.shift2Label = wc.QLeftLabel("y Shift:", self)
frame.addWidget(self.shift2Label, 4, 0)
scaleEntry = wc.SsnakeDoubleSpinBox()
scaleEntry.setDecimals(4)
scaleEntry.setMaximum(1e3)
scaleEntry.setMinimum(-1e3)
scaleEntry.setSingleStep(0.1)
scaleEntry.setValue(self.father.current.viewSettings["extraScale"][i])
scaleEntry.valueChanged.connect(lambda arg, num=i: self.setScale(arg, num))
scaleEntry.setToolTip(TOOLTIPS['multiplotScale'])
frame.addWidget(scaleEntry, 2, 1, 1, 2)
shiftEntry = wc.SsnakeDoubleSpinBox()
shiftEntry.setDecimals(4)
shiftEntry.setMaximum(1e3)
shiftEntry.setMinimum(-1e3)
shiftEntry.setSingleStep(0.1)
shiftEntry.setValue(self.father.current.viewSettings["extraShift"][i])
shiftEntry.valueChanged.connect(lambda arg, num=i: self.setShift(arg, num))
shiftEntry.setToolTip(TOOLTIPS['multiplotShift1'])
frame.addWidget(shiftEntry, 3, 1, 1, 2)
shiftEntry = wc.SsnakeDoubleSpinBox()
shiftEntry.setDecimals(4)
shiftEntry.setMaximum(1e3)
shiftEntry.setMinimum(-1e3)
shiftEntry.setSingleStep(0.1)
shiftEntry.setValue(self.father.current.viewSettings["extraShift2"][i])
shiftEntry.valueChanged.connect(lambda arg, num=i: self.setShift2(arg, num))
shiftEntry.setToolTip(TOOLTIPS['multiplotShift2'])
frame.addWidget(shiftEntry, 4, 1, 1, 2)
entries = []
self.extraEntries.append(entries)
buttons1 = []
self.extraButtons1.append(buttons1)
self.extraButtons1Group.append(QtWidgets.QButtonGroup(self))
self.extraButtons1Group[i].buttonClicked.connect(lambda: self.setExtraAxes(True))
buttons2 = []
self.extraButtons2.append(buttons1)
self.extraButtons2Group.append(QtWidgets.QButtonGroup(self))
self.extraButtons2Group[i].buttonClicked.connect(lambda: self.setExtraAxes(False))
if current.viewSettings["extraData"][i].ndim() > 1:
for num in range(current.viewSettings["extraData"][i].ndim()):
offset = 0
buttons1.append(QtWidgets.QRadioButton(''))
buttons1[-1].setToolTip(TOOLTIPS['multiplotDim1'])
self.extraButtons1Group[i].addButton(buttons1[num], num)
frame.addWidget(buttons1[num], num * 3 + 6, 0)
if self.plotIs2D:
offset = 1
buttons2.append(QtWidgets.QRadioButton(''))
buttons2[-1].setToolTip(TOOLTIPS['multiplotDim2'])
self.extraButtons2Group[i].addButton(buttons2[num], num)
frame.addWidget(buttons2[num], num * 3 + 6, 1)
frame.addWidget(wc.QLabel("D" + str(num + 1), self), num * 3 + 5, 1 + offset)
entries.append(wc.SliceSpinBox(self, 0, current.viewSettings["extraData"][i].shape()[num] - 1))
entries[-1].setToolTip(TOOLTIPS['sideFrameDimensionSlice'])
frame.addWidget(entries[num], num * 3 + 6, 1 + offset)
entries[num].setValue(current.viewSettings["extraLoc"][i][num])
entries[num].valueChanged.connect(lambda event, num=num, i=i: self.getExtraSlice(num, i))
self.extraButtons1Group[i].button(current.viewSettings["extraAxes"][i][-1]).toggle()
if self.plotIs2D:
self.extraButtons2Group[i].button(current.viewSettings["extraAxes"][i][-2]).toggle()
iter1 += 1
addButton = QtWidgets.QPushButton("Add plot", self)
addButton.setToolTip(TOOLTIPS['multiplotAddPlot'])
addButton.clicked.connect(self.addMultiSpec)
self.frame2.addWidget(addButton, iter1, 0, 1, 2)
QtCore.QTimer.singleShot(100, self.resizeAll)
def resizeAll(self):
self.setMinimumWidth(self.grid.sizeHint().width() + self.verticalScrollBar().sizeHint().width())
def setToFrom(self, *args):
current = self.father.current
if not (type(current) is views.CurrentStacked or type(current) is views.CurrentArrayed):
return
fromVar = self.fromSpin.value()
toVar = self.toSpin.value()
stepVar = self.stepSpin.value()
current.stackSelect(fromVar, toVar, stepVar)
self.fromSpin.setMaximum(toVar - 1)
self.toSpin.setMinimum(fromVar + 1)
def scrollSpacing(self, var):
self.spacingEntry.setText('%#.3g' % var)
def setSpacing(self, *args):
var = safeEval(self.spacingEntry.text(), length=self.father.current.len(), Type='FI')
self.spacingEntry.setText('%#.3g' % var)
self.father.current.setSpacing(var)
def setContour(self, *args):
var1 = self.numLEntry.value()
maxC = safeEval(self.maxLEntry.text(), length=self.father.current.len(), Type='FI')
if maxC is None:
maxC = self.father.current.viewSettings["maxLevels"] * 100
self.father.father.dispMsg('Invalid value for contour maximum')
else:
maxC = abs(float(maxC))
minC = safeEval(self.minLEntry.text(), length=self.father.current.len(), Type='FI')
if minC is None:
minC = self.father.current.viewSettings["minLevels"] * 100
self.father.father.dispMsg('Invalid value for contour minimum')
else:
minC = abs(float(minC))
if minC > maxC: # if wrong order, interchange
maxC, minC = (minC, maxC)
self.maxLEntry.setText(str(maxC))
self.minLEntry.setText(str(minC))
cSign = self.contourSignEntry.currentIndex()
cType = self.contourTypeEntry.currentIndex()
if cType == 0:
self.multiValue.hide()
self.multiValueLabel.hide()
else:
self.multiValue.show()
self.multiValueLabel.show()
multi = safeEval(self.multiValue.text(), length=self.father.current.len(), Type='FI')
if multi is None or multi <= 1.0:
multi = self.father.current.viewSettings["multiValue"]
self.father.father.dispMsg('Invalid value for contour multiplier')
else:
multi = abs(float(multi))
self.multiValue.setText(str(multi))
limitType = self.contourLimType.currentIndex()
self.father.current.setLevels(var1, maxC / 100.0, minC / 100.0, limitType, cSign, cType, multi)
def changeProj(self, pType, direc):
if pType == 4:
if direc == 1:
self.projTraceTop.show()
else:
self.projTraceRight.show()
else:
self.selectTraceButton.hide()
if direc == 1:
self.projTraceTop.hide()
else:
self.projTraceRight.hide()
self.father.current.setProjType(pType, direc)
if (self.father.current.viewSettings["projTop"] == 4) or (self.father.current.viewSettings["projRight"] == 4):
self.selectTraceButton.show()
else:
self.selectTraceButton.hide()
# if not self.FITTING:
# self.father.current.clearProj()
# self.father.current.showAllProj()
# else:
# self.father.current.showFid()
def changeTrace(self, num, direc):
self.father.current.setProjTraces(num, direc)
# if not self.FITTING:
# self.father.current.clearProj()
# self.father.current.showAllProj()
# else:
# self.father.current.showFid()
def selectTraces(self, *args):
self.father.current.peakPickFunc = lambda pos, self=self: self.pickedTraces(pos)
self.father.current.peakPick = 3
def pickedTraces(self, pos):
if self.father.current.viewSettings["projTop"] == 4 and pos[3] != self.projTraceTop.value():
self.projTraceTop.setValue(pos[3])
if self.father.current.viewSettings["projRight"] == 4 and pos[3] != self.projTraceRight.value():
self.projTraceRight.setValue(pos[0])
def changeRanges(self):
check = self.rangeCheckbox.isChecked()
ranges = [self.projTopRangeMax.value(), self.projTopRangeMin.value(), self.projRightRangeMax.value(), self.projRightRangeMin.value()]
self.father.current.setProjLimits(check, ranges)
# if not self.FITTING:
# self.father.current.clearProj()
# self.father.current.showAllProj()
# else:
# self.father.current.showFid()
def activateRanges(self, state):
if state:
self.projTopRangeMaxLabel.show()
self.projTopRangeMax.show()
self.projTopRangeMinLabel.show()
self.projTopRangeMin.show()
self.projRightRangeMaxLabel.show()
self.projRightRangeMax.show()
self.projRightRangeMinLabel.show()
self.projRightRangeMin.show()
else:
self.projTopRangeMaxLabel.hide()
self.projTopRangeMax.hide()
self.projTopRangeMinLabel.hide()
self.projTopRangeMin.hide()
self.projRightRangeMaxLabel.hide()
self.projRightRangeMax.hide()
self.projRightRangeMinLabel.hide()
self.projRightRangeMin.hide()
self.changeRanges()
def setAxes(self, first=True):
axes = self.buttons1Group.checkedId()
if self.plotIs2D:
axes2 = self.buttons2Group.checkedId()
if axes == axes2:
if first:
axes2 = self.father.current.axes[-1]
else:
axes = self.father.current.axes[-2]
if isinstance(self.father.current, (views.CurrentContour)): # If contour
# Correct proj values and maxima
newRanges = [self.projRightRangeMax.value(), self.projRightRangeMin.value(), self.projTopRangeMax.value(), self.projTopRangeMin.value()]
self.father.current.setProjLimits(self.rangeCheckbox.isChecked(), newRanges)
self.father.current.setProjTraces(self.projTraceTop.value(), 1)
self.father.current.setProjTraces(self.projTraceRight.value(), 0)
#Flip diagonal multiplier:
inp = safeEval(self.diagonalEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.father.current.viewSettings["diagonalMult"] = 1.0 / inp
#Make sure the bottom frame nicely inverts the axis units
time1 = self.father.bottomframe.axisDropTime.currentIndex()
time2 = self.father.bottomframe.axisDropTime2.currentIndex()
freq1 = self.father.bottomframe.axisDropFreq.currentIndex()
freq2 = self.father.bottomframe.axisDropFreq2.currentIndex()
self.father.bottomframe.axisDropTime.setCurrentIndex(time2)
self.father.bottomframe.axisDropTime2.setCurrentIndex(time1)
self.father.bottomframe.axisDropFreq.setCurrentIndex(freq2)
self.father.bottomframe.axisDropFreq2.setCurrentIndex(freq1)
if bool(self.father.current.spec()) is True:
tmp1 = freq1
else:
tmp1 = time1
if bool(self.father.current.spec(-2)) is True:
tmp2 = freq2
else:
tmp2 = time2
self.father.bottomframe.changeAxis(tmp2, update=False)
self.father.bottomframe.changeAxis2(tmp1, update=False)
self.buttons2Group.button(axes2).toggle()
self.getSlice(axes, True)
self.upd()
def getSlice(self, entryNum, button=False):
axisChange = False
if button:
dimNum = entryNum
axisChange = True
elif not self.plotIs2D:
if entryNum == self.father.current.axes[-1]:
if entryNum == self.length - 1:
dimNum = self.length - 2
axisChange = True
else:
dimNum = self.length - 1
axisChange = True
else:
dimNum = self.father.current.axes[-1]
else:
dimNum = self.father.current.axes[-1]
locList = np.array(self.father.current.locList, dtype=int)
for num in range(self.length):
locList[num] = self.entries[num].value()
if not self.FITTING:
self.buttons1Group.button(dimNum).toggle()
axes = np.array([self.buttons2Group.checkedId(), dimNum], dtype=int)
else:
axes = self.father.current.axes
if self.plotIs2D:
self.father.current.setSlice(axes, locList)
else:
self.father.current.setSlice(np.array([dimNum]), locList)
if not self.FITTING:
self.father.bottomframe.upd()
if axisChange:
self.father.menuCheck()
self.upd()
def setExtraAxes(self, first=True):
for i in range(len(self.extraButtons1Group)):
axes = self.extraButtons1Group[i].checkedId()
if self.plotIs2D:
axes2 = self.extraButtons2Group[i].checkedId()
if axes == axes2:
if first:
axes2 = self.father.current.viewSettings["extraAxes"][i][-1]
else:
axes = self.father.current.viewSettings["extraAxes"][i][-2]
self.extraButtons2Group[i].button(axes2).toggle()
self.getExtraSlice(axes, i, True)
self.father.current.showFid()
def getExtraSlice(self, entryNum, entryi, button=False):
length = self.father.current.viewSettings["extraData"][entryi].ndim()
if button:
dimNum = entryNum
else:
if entryNum == self.father.current.viewSettings["extraAxes"][entryi][-1]:
if entryNum == length - 1:
dimNum = length - 2
else:
dimNum = length - 1
else:
dimNum = self.father.current.viewSettings["extraAxes"][entryi][-1]
locList = np.array(self.father.current.viewSettings["extraLoc"][entryi])
for num in range(length):
locList[num] = self.extraEntries[entryi][num].value()
self.extraButtons1Group[entryi].button(dimNum).toggle()
axes = np.array([self.extraButtons2Group[entryi].checkedId(), dimNum], dtype=int)
if self.plotIs2D:
self.father.current.setExtraSlice(entryi, axes, locList)
else:
self.father.current.setExtraSlice(entryi, np.array([dimNum]), locList)
if not button:
self.father.current.showFid()
# self.upd()
def setScale(self, scale, num):
self.father.current.setExtraScale(num, scale)
def setOffset(self, offset, num):
self.father.current.setExtraOffset(num, offset * 10**self.OOM)
def setShift(self, shift, num):
self.father.current.setExtraShift(num, shift)
def setShift2(self, shift, num):
self.father.current.setExtraShift2(num, shift)
def switchDiagonal(self, val):
self.father.current.setDiagonal(bool(val))
def setDiagonal(self):
inp = safeEval(self.diagonalEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
inp = self.father.current.viewSettings["diagonalMult"]
self.father.father.dispMsg('Invalid value for diagonal multiplier')
else:
inp = float(inp)
self.diagonalEntry.setText(str(inp))
self.father.current.setDiagonal(None, inp)
def checkChanged(self):
for i in range(len(self.father.current.viewSettings["extraData"])):
extraData = self.father.current.viewSettings["extraData"][i]
if extraData.ndim() > 1:
for j in range(len(self.extraEntries[i])):
self.extraEntries[i][j].setMaximum(extraData.data[0].shape[j] - 1)
self.upd()
self.father.current.showFid()
def setExtraColor(self, num):
color = QtWidgets.QColorDialog.getColor()
if not color.isValid():
return
self.father.current.setExtraColor(num, color.getRgbF())
self.nameLabels[num].setStyleSheet("QLabel { color: rgb" + str(self.father.current.getExtraColor(num)) + ";}")
def addMultiSpec(self, *args):
text = QtWidgets.QInputDialog.getItem(self, "Select spectrum to show", "Spectrum name:", self.father.father.workspaceNames, 0, False)
if text[1]:
self.father.current.addExtraData(self.father.father.workspaces[self.father.father.workspaceNames.index(text[0])].get_masterData(), str(text[0]))
self.upd()
def delMultiSpec(self, num):
self.father.current.delExtraData(num)
self.upd()
################################################################################
class BottomFrame(QtWidgets.QWidget):
def __init__(self, parent):
super(BottomFrame, self).__init__(parent)
self.father = parent
grid = QtWidgets.QGridLayout(self)
self.setLayout(grid)
fourierButton = QtWidgets.QPushButton("Fourier", parent=self)
fourierButton.setToolTip(TOOLTIPS['Fourier'])
fourierButton.clicked.connect(self.father.fourier)
grid.addWidget(fourierButton, 0, 0, 2, 1)
self.specGroup = QtWidgets.QButtonGroup(self)
self.specGroup.buttonClicked.connect(self.changeSpec)
timeButton = QtWidgets.QRadioButton('Time', parent=self)
timeButton.setToolTip(TOOLTIPS['timeButton'])
self.specGroup.addButton(timeButton, 0)
grid.addWidget(timeButton, 0, 1)
freqButton = QtWidgets.QRadioButton('Frequency', parent=self)
freqButton.setToolTip(TOOLTIPS['freqButton'])
self.specGroup.addButton(freqButton, 1)
grid.addWidget(freqButton, 1, 1)
self.wholeEcho = QtWidgets.QCheckBox("Whole echo", parent=self)
self.wholeEcho.setToolTip(TOOLTIPS['wholeEcho'])
self.wholeEcho.clicked.connect(self.setWholeEcho)
grid.addWidget(self.wholeEcho, 0, 2, 2, 1)
grid.addWidget(wc.QLabel("Freq [MHz]:", self), 0, 3)
self.freqEntry = wc.QLineEdit('', self.changeFreq, parent=self)
self.freqEntry.setToolTip(TOOLTIPS['freqEntry'])
grid.addWidget(self.freqEntry, 1, 3)
grid.addWidget(wc.QLabel("Sweepwidth [kHz]:", self), 0, 4)
self.swEntry = wc.QLineEdit('', self.changeFreq, parent=self)
self.swEntry.setToolTip(TOOLTIPS['swEntry'])
grid.addWidget(self.swEntry, 1, 4)
grid.addWidget(wc.QLabel("Plot:", self), 0, 5)
self.plotDrop = QtWidgets.QComboBox(parent=self)
self.plotDrop.addItems(["Real", "Imag", "Both", "Abs"])
self.plotDrop.setToolTip(TOOLTIPS['plotDrop'])
self.plotDrop.activated.connect(self.changePlot)
grid.addWidget(self.plotDrop, 1, 5)
grid.addWidget(wc.QLabel("Axis:", self), 0, 6)
self.axisDropTime = QtWidgets.QComboBox(parent=self)
self.axisDropTime.setToolTip(TOOLTIPS['axisDrop'])
self.axisDropTime.addItems(["s", "ms", u"μs"])
self.axisDropTime.activated.connect(self.changeAxis)
grid.addWidget(self.axisDropTime, 1, 6)
self.axisDropFreq = QtWidgets.QComboBox(parent=self)
self.axisDropFreq.addItems(["Hz", "kHz", "MHz", "ppm"])
self.axisDropFreq.setToolTip(TOOLTIPS['axisDrop'])
self.axisDropFreq.activated.connect(self.changeAxis)
grid.addWidget(self.axisDropFreq, 1, 6)
self.ax2Label = wc.QLabel("Axis2:", self)
grid.addWidget(self.ax2Label, 0, 7)
self.axisDropTime2 = QtWidgets.QComboBox(parent=self)
self.axisDropTime2.addItems(["s", "ms", u"μs"])
self.axisDropTime2.setToolTip(TOOLTIPS['axis2Drop'])
self.axisDropTime2.activated.connect(self.changeAxis2)
grid.addWidget(self.axisDropTime2, 1, 7)
self.axisDropFreq2 = QtWidgets.QComboBox(parent=self)
self.axisDropFreq2.setToolTip(TOOLTIPS['axis2Drop'])
self.axisDropFreq2.addItems(["Hz", "kHz", "MHz", "ppm"])
self.axisDropFreq2.activated.connect(self.changeAxis2)
grid.addWidget(self.axisDropFreq2, 1, 7)
grid.setColumnStretch(10, 1)
grid.setAlignment(QtCore.Qt.AlignLeft)
self.grid = grid
self.upd()
def kill(self):
for i in reversed(range(self.grid.count())):
self.grid.itemAt(i).widget().deleteLater()
self.grid.deleteLater()
def frameEnable(self, enable=True):
self.setEnabled(enable)
def upd(self):
self.freqEntry.setText('%.6f' % (self.father.current.freq() / 1000000.0))
self.swEntry.setText('%.6f' % (self.father.current.sw() / 1000.0))
self.axisDropTime2.hide()
self.axisDropFreq2.hide()
self.axisDropFreq.model().item(3).setEnabled(True)
if self.father.current.spec() == 0:
self.specGroup.button(0).toggle()
self.axisDropFreq.hide()
self.axisDropTime.show()
self.ax2Label.hide()
self.axisDropTime.setCurrentIndex(self.father.current.getAxType())
elif self.father.current.spec() == 1:
self.specGroup.button(1).toggle()
self.axisDropTime.hide()
self.axisDropFreq.show()
if self.father.current.freq() == 0.0 or self.father.current.ref() == 0.0:
self.axisDropFreq.model().item(3).setEnabled(False)
self.ax2Label.hide()
if self.father.current.getppm() and self.father.current.freq() != 0.0 and self.father.current.ref() != 0.0:
self.axisDropFreq.setCurrentIndex(3)
else:
self.axisDropFreq.setCurrentIndex(self.father.current.getAxType())
if isinstance(self.father.current, views.CurrentContour):
self.ax2Label.show()
self.axisDropFreq2.model().item(3).setEnabled(True)
if self.father.current.spec(-2) == 0:
self.axisDropTime2.show()
self.axisDropTime2.setCurrentIndex(self.father.current.getAxType(-2))
elif self.father.current.spec(-2) == 1:
self.axisDropFreq2.show()
if self.father.current.freq(-2) == 0.0 or self.father.current.ref(-2) == 0.0:
self.axisDropFreq2.model().item(3).setEnabled(False)
if self.father.current.getppm(-2) and self.father.current.freq(-2) != 0.0 and self.father.current.ref(-2) != 0.0:
self.axisDropFreq2.setCurrentIndex(3)
else:
self.axisDropFreq2.setCurrentIndex(self.father.current.getAxType(-2))
if type(self.father.current) is views.CurrentArrayed:
self.ax2Label.show()
self.axisDropFreq2.model().item(3).setEnabled(True)
if self.father.current.spec(-2) == 0:
self.axisDropTime2.show()
self.axisDropTime2.setCurrentIndex(self.father.current.getAxType(-2))
elif self.father.current.spec(-2) == 1:
self.axisDropFreq2.show()
if self.father.current.freq(-2) == 0.0:
self.axisDropFreq2.model().item(3).setEnabled(False)
if self.father.current.getppm(-2) and self.father.current.freq(-2) != 0.0 and self.father.current.ref(-2) != 0.0:
self.axisDropFreq2.setCurrentIndex(3)
else:
self.axisDropFreq2.setCurrentIndex(self.father.current.getAxType(-2))
if self.father.current.wholeEcho():
self.wholeEcho.setCheckState(QtCore.Qt.Checked)
else:
self.wholeEcho.setCheckState(QtCore.Qt.Unchecked)
def setWholeEcho(self, inp):
self.father.current.setWholeEcho(inp)
self.father.menuCheck()
def changeSpec(self):
self.father.current.setSpec(self.specGroup.checkedId())
self.upd()
self.father.menuCheck()
def changeFreq(self):
freq = safeEval(self.freqEntry.text(), length=self.father.current.len(), Type='FI')
sw = safeEval(self.swEntry.text(), length=self.father.current.len(), Type='FI')
if sw is None:
self.father.father.dispMsg('Invalid sweepwidth')
elif sw == 0.0:
sw = None
self.father.father.dispMsg('Sweepwidth cannot be 0')
else:
sw *= 1000
if freq is not None:
freq *= 1e6
else:
self.father.father.dispMsg('Invalid spectrum frequency')
self.father.setFreq(freq, sw)
self.upd()
def changePlot(self, pType):
self.father.current.viewSettings["plotType"] = pType
self.father.current.showFid()
def changeAxis(self, pType, update=True):
self.father.current.setAxType(pType, update)
def changeAxis2(self, pType, update=True):
self.father.current.setAxType(pType, update, -2)
##################################################################
class TextFrame(QtWidgets.QScrollArea):
def __init__(self, parent):
super(TextFrame, self).__init__(parent)
self.father = parent
self.oldx = 0.0
self.oldy = 0.0
self.oldamp = 0.0
widthScale = 0.6
content = QtWidgets.QWidget()
grid = QtWidgets.QGridLayout(content)
getButton = QtWidgets.QPushButton("&Get Position")
getButton.setToolTip(TOOLTIPS['GetPos'])
getButton.clicked.connect(self.getPosition)
grid.addWidget(getButton, 0, 1)
grid.addWidget(wc.QLabel("x-Position:"), 0, 2)
self.xpos = wc.QLineEdit("0")
self.xpos.setReadOnly(True)
self.xpos.setToolTip(TOOLTIPS['xPosition'])
self.xpos.setFixedWidth(int(self.xpos.sizeHint().width() * widthScale))
grid.addWidget(self.xpos, 0, 3)
self.yposlabel = wc.QLabel("y-Position:")
grid.addWidget(self.yposlabel, 0, 4)
self.ypos = wc.QLineEdit("0")
self.ypos.setToolTip(TOOLTIPS['yPosition'])
self.ypos.setReadOnly(True)
self.ypos.setFixedWidth(int(self.ypos.sizeHint().width() * widthScale))
grid.addWidget(self.ypos, 0, 5)
grid.addWidget(wc.QLabel("x-Value:"), 0, 6)
self.xpoint = wc.QLineEdit("0.0")
self.xpoint.setToolTip(TOOLTIPS['xValue'])
self.xpoint.setReadOnly(True)
self.xpoint.setFixedWidth(int(self.xpoint.sizeHint().width() * widthScale))
grid.addWidget(self.xpoint, 0, 7)
self.ylabel = wc.QLabel("y-Value:")
grid.addWidget(self.ylabel, 0, 8)
self.ypoint = wc.QLineEdit("0.0")
self.ypoint.setToolTip(TOOLTIPS['yValue'])
self.ypoint.setReadOnly(True)
self.ypoint.setFixedWidth(int(self.ypoint.sizeHint().width() * widthScale))
grid.addWidget(self.ypoint, 0, 9)
grid.addWidget(wc.QLabel("Amp:"), 0, 10)
self.amppoint = wc.QLineEdit("0.0")
self.amppoint.setToolTip(TOOLTIPS['ampValue'])
self.amppoint.setReadOnly(True)
self.amppoint.setFixedWidth(int(self.amppoint.sizeHint().width() * widthScale))
grid.addWidget(self.amppoint, 0, 11)
grid.addWidget(wc.QLabel(u"Δx:"), 0, 12)
self.deltaxpoint = wc.QLineEdit("0.0")
self.deltaxpoint.setToolTip(TOOLTIPS['deltaxvalue'])
self.deltaxpoint.setReadOnly(True)
self.deltaxpoint.setFixedWidth(int(self.deltaxpoint.sizeHint().width() * widthScale))
grid.addWidget(self.deltaxpoint, 0, 13)
self.deltaylabel = wc.QLabel(u"Δy:")
grid.addWidget(self.deltaylabel, 0, 14)
self.deltaypoint = wc.QLineEdit("0.0")
self.deltaypoint.setToolTip(TOOLTIPS['deltayvalue'])
self.deltaypoint.setReadOnly(True)
self.deltaypoint.setFixedWidth(int(self.deltaypoint.sizeHint().width() * widthScale))
grid.addWidget(self.deltaypoint, 0, 15)
grid.addWidget(wc.QLabel(u"Δamp:"), 0, 16)
self.deltaamppoint = wc.QLineEdit("0.0")
self.deltaamppoint.setToolTip(TOOLTIPS['deltaamplitude'])
self.deltaamppoint.setReadOnly(True)
self.deltaamppoint.setFixedWidth(int(self.deltaamppoint.sizeHint().width() * widthScale))
grid.addWidget(self.deltaamppoint, 0, 17)
grid.setColumnStretch(20, 1)
self.grid = grid
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setWidget(content)
self.setMaximumHeight(self.grid.sizeHint().height() + self.horizontalScrollBar().sizeHint().height())
self.upd()
def upd(self):
if isinstance(self.father.current, views.CurrentContour):
self.ypos.show()
self.yposlabel.show()
self.ypoint.show()
self.deltaypoint.show()
self.ylabel.show()
self.deltaylabel.show()
else:
self.ypos.hide()
self.yposlabel.hide()
self.ypoint.hide()
self.deltaypoint.hide()
self.ylabel.hide()
self.deltaylabel.hide()
def kill(self):
for i in reversed(range(self.grid.count())):
self.grid.itemAt(i).widget().deleteLater()
self.grid.deleteLater()
def frameEnable(self, enable=True):
for child in self.children():
child.setEnabled(enable)
def setLabels(self, position):
if len(position) > 3:
self.ypos.setText(str(position[3]))
self.deltaypoint.setText('%#.4g' % np.abs(self.oldy - position[4]))
self.ypoint.setText('%#.4g' % position[4])
self.oldy = position[4]
self.deltaxpoint.setText('%#.4g' % np.abs(self.oldx - position[1]))
self.deltaamppoint.setText('%#.4g' % np.abs(self.oldamp - position[2]))
self.xpos.setText(str(position[0]))
self.xpoint.setText('%#.4g' % position[1])
self.amppoint.setText('%#.4g' % position[2])
self.oldx = position[1]
self.oldamp = position[2]
def getPosition(self, *args):
self.father.current.peakPickFunc = lambda pos, self=self: self.setLabels(pos)
if isinstance(self.father.current, views.CurrentContour):
self.father.current.peakPick = 2
else:
self.father.current.peakPick = True
#################################################################################
class AsciiLoadWindow(QtWidgets.QDialog):
dataOrders = ['XRI', 'XR', 'XI', 'RI', 'R']
delimiters = ['Tab', 'Space', 'Comma']
timeUnits = ['s','ms',u'μs']
timeMultiVals = [1.0,1.0e-3,1.0e-6]
freqUnits = ['Hz','kHz','MHz','ppm']
freqMultiVals = [1.0, 1.0e3, 1.0e6, None]
def __init__(self, parent, file):
super(AsciiLoadWindow, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.Tool | QtCore.Qt.WindowContextHelpButtonHint)
self.dataDimension = 1
self.dataSpec = False
self.dataOrder = 'XRI'
self.sw = 0.0
self.delim = 'Tab'
self.closed = False
self.axisMulti = 1.0
self.setWindowTitle("Load ASCII")
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel("# Dimensions:"), 1, 0)
self.numDims = wc.SsnakeSpinBox()
self.numDims.setMinimum(1)
self.numDims.setValue(1)
self.numDims.setMaximum(2)
grid.addWidget(self.numDims, 2, 0, 1, 2)
grid.addWidget(QtWidgets.QLabel("Data Type:"), 3, 0)
self.specGroup = QtWidgets.QButtonGroup(self)
self.specGroup.buttonClicked.connect(self.checkState)
self.timeButton = QtWidgets.QRadioButton('Time', parent=self)
self.timeButton.toggle()
self.specGroup.addButton(self.timeButton, 0)
grid.addWidget(self.timeButton, 4, 0)
self.freqButton = QtWidgets.QRadioButton('Frequency', parent=self)
self.specGroup.addButton(self.freqButton, 1)
grid.addWidget(self.freqButton, 4, 1)
grid.addWidget(QtWidgets.QLabel("Data Order:"), 5, 0)
self.datOrderBox = QtWidgets.QComboBox()
self.datOrderBox.addItems(self.dataOrders)
grid.addWidget(self.datOrderBox, 6, 0, 1, 2)
self.unitLabel = wc.QLeftLabel("x-axis unit:")
grid.addWidget(self.unitLabel, 7, 0, 1, 2)
self.timeUnitBox = QtWidgets.QComboBox()
self.timeUnitBox.addItems(self.timeUnits)
grid.addWidget(self.timeUnitBox, 8, 0, 1, 2)
self.freqUnitBox = QtWidgets.QComboBox()
self.freqUnitBox.addItems(self.freqUnits)
self.freqUnitBox.currentIndexChanged.connect(self.checkState)
grid.addWidget(self.freqUnitBox, 8, 0, 1, 2)
self.freqUnitBox.hide()
self.swLabel = wc.QLeftLabel("Spectral Width [kHz]:")
grid.addWidget(self.swLabel, 9, 0, 1, 2)
self.swEntry = wc.QLineEdit("0.0")
grid.addWidget(self.swEntry, 10, 0, 1, 2)
self.swLabel.hide()
self.swEntry.hide()
self.datOrderBox.currentIndexChanged.connect(self.checkState)
self.freqLabel = wc.QLeftLabel("Spectrometer freq. [MHz]:")
grid.addWidget(self.freqLabel, 11, 0, 1, 2)
self.freqEntry = wc.QLineEdit("0.0")
grid.addWidget(self.freqEntry, 12, 0, 1, 2)
self.freqLabel.hide()
self.freqEntry.hide()
grid.addWidget(QtWidgets.QLabel("Data Delimiter:"), 13, 0)
self.datDelimBox = QtWidgets.QComboBox()
self.datDelimBox.addItems(self.delimiters)
grid.addWidget(self.datDelimBox, 14, 0, 1, 2)
cancelButton = QtWidgets.QPushButton("&Cancel")
cancelButton.clicked.connect(self.closeEvent)
okButton = QtWidgets.QPushButton("&Ok")
okButton.clicked.connect(self.applyAndClose)
box = QtWidgets.QDialogButtonBox()
box.addButton(cancelButton, QtWidgets.QDialogButtonBox.RejectRole)
box.addButton(okButton, QtWidgets.QDialogButtonBox.AcceptRole)
grid.addWidget(box, 15, 0, 1, 2)
self.show()
self.setFixedSize(self.size())
self.checkType(file)
def checkState(self):
tmp = self.dataOrders[self.datOrderBox.currentIndex()]
if tmp in ('RI', 'R'):
self.swLabel.show()
self.swEntry.show()
self.unitLabel.hide()
self.timeUnitBox.hide()
self.freqUnitBox.hide()
self.freqLabel.hide()
self.freqEntry.hide()
else:
self.swLabel.hide()
self.swEntry.hide()
self.unitLabel.show()
if self.timeButton.isChecked():
self.timeUnitBox.show()
self.freqUnitBox.hide()
self.freqLabel.hide()
self.freqEntry.hide()
else:
self.timeUnitBox.hide()
self.freqUnitBox.show()
self.freqLabel.hide()
self.freqEntry.hide()
if self.freqUnitBox.currentIndex() == 3:
self.freqLabel.show()
self.freqEntry.show()
def checkType(self, file):
if file.endswith('.zip'):
return # cannot read zipped files from here
try:
with open(file, 'r') as f:
line = f.readline()
if line.count(',') > 0:
sep = 'Comma'
elif line.count('\t') > 0:
sep = 'Tab'
else:
sep = 'Space'
self.datDelimBox.setCurrentIndex(self.delimiters.index(sep))
sepList = ['\t', ' ', ',']
data = np.fromstring(line, sep=sepList[self.delimiters.index(sep)])
if len(data) > 3:
self.numDims.setValue(2)
except Exception:
return
def closeEvent(self, *args):
self.closed = True
self.accept()
self.deleteLater()
def applyAndClose(self):
self.dataOrder = self.dataOrders[self.datOrderBox.currentIndex()]
self.delim = self.delimiters[self.datDelimBox.currentIndex()]
if self.dataOrder == 'RI' or self.dataOrder == 'R':
self.sw = safeEval(self.swEntry.text(), Type='FI')
if self.sw == 0 or self.sw is None:
raise SsnakeException('Spectral Width input is not valid')
self.dataDimension = self.numDims.value()
if self.timeButton.isChecked():
self.dataSpec = False
else:
self.dataSpec = True
if 'X' in self.dataOrder: # If there is an x-axis
if self.dataSpec == False:
self.axisMulti = self.timeMultiVals[self.timeUnitBox.currentIndex()]
else:
if self.freqUnitBox.currentIndex() == 3: #If ppm
self.axisMulti = safeEval(self.freqEntry.text())
else:
self.axisMulti = self.freqMultiVals[self.freqUnitBox.currentIndex()]
self.accept()
self.deleteLater()
#################################################################################
class WorkInfoWindow(QtWidgets.QDialog):
#A window to view info of the workspace (size etc)
def __init__(self, parent):
super(WorkInfoWindow, self).__init__(parent)
self.father = parent
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.Tool | QtCore.Qt.WindowContextHelpButtonHint)
self.setWindowTitle("Workspace Info")
grid = QtWidgets.QGridLayout(self)
workGroup = QtWidgets.QGroupBox('Data:')
workFrame = QtWidgets.QGridLayout()
workGroup.setLayout(workFrame)
workFrame.addWidget(QtWidgets.QLabel("Name:"), 0, 0)
workFrame.addWidget(wc.QSelectLabel(self.father.masterData.name), 0, 1)
workFrame.addWidget(QtWidgets.QLabel("# Dimensions:"), 1, 0)
workFrame.addWidget(wc.QSelectLabel(str(self.father.masterData.ndim())), 1, 1)
sw = self.father.masterData.sw
np = self.father.masterData.shape()
freq = self.father.masterData.freq
ref = [x/1e6 if x is not None else x for x in self.father.masterData.ref]
whole = self.father.masterData.wholeEcho
spec = ['Time' if x == 0 else 'Frequency' for x in self.father.masterData.spec]
for x in range(self.father.masterData.ndim()):
workFrame.addWidget(wc.QSelectLabel('D' + str(x+1)), 2, x+1)
workFrame.addWidget(wc.QSelectLabel(str(sw[x]/1000)), 3, x+1)
workFrame.addWidget(wc.QSelectLabel(str(freq[x]/1e6)), 4, x+1)
workFrame.addWidget(wc.QSelectLabel(str(ref[x])), 5, x+1)
workFrame.addWidget(wc.QSelectLabel(str(np[x])), 6, x+1)
workFrame.addWidget(wc.QSelectLabel(spec[x]), 7, x+1)
workFrame.addWidget(wc.QSelectLabel(str(self.father.masterData.isComplex(x))), 8, x+1)
workFrame.addWidget(wc.QSelectLabel(str(whole[x])), 9, x+1)
workFrame.addWidget(QtWidgets.QLabel('Spectral Width [kHz]:'), 3, 0)
workFrame.addWidget(QtWidgets.QLabel('Frequency [MHz]:'), 4, 0)
workFrame.addWidget(QtWidgets.QLabel('Reference [MHz]:'), 5, 0)
workFrame.addWidget(QtWidgets.QLabel('Number of Points:'), 6, 0)
workFrame.addWidget(QtWidgets.QLabel('Type:'), 7, 0)
workFrame.addWidget(QtWidgets.QLabel('Complex:'), 8, 0)
workFrame.addWidget(QtWidgets.QLabel('Whole Echo:'), 9, 0)
grid.addWidget(workGroup, 0, 0, 1, 3)
metaGroup = QtWidgets.QGroupBox('Metadata:')
metaFrame = QtWidgets.QGridLayout()
metaGroup.setLayout(metaFrame)
for pos, key in enumerate(self.father.masterData.metaData):
metaFrame.addWidget(QtWidgets.QLabel(key), pos, 0)
metaFrame.addWidget(wc.QSelectLabel(self.father.masterData.metaData[key]), pos, 1)
grid.addWidget(metaGroup, 1, 0, 1, 3)
okButton = QtWidgets.QPushButton("&Close")
okButton.clicked.connect(self.closeEvent)
grid.addWidget(okButton, 2, 1)
self.show()
self.setFixedSize(self.size())
def closeEvent(self, *args):
self.closed = True
self.accept()
self.deleteLater()
#################################################################################
class PhaseWindow(wc.ToolWindow):
NAME = "Phasing"
SINGLESLICE = True
RESOLUTION = 1000
P1LIMIT = 540.0
PHASE0STEP = 1.0
PHASE1STEP = 1.0
def __init__(self, parent):
super(PhaseWindow, self).__init__(parent)
self.zeroVal = 0.0
self.firstVal = 0.0
self.pivotVal = 0.0
self.available = True
# Zero order
self.zeroOrderGroup = QtWidgets.QGroupBox('Zero order:')
self.zeroOrderFrame = QtWidgets.QGridLayout()
autoZero = QtWidgets.QPushButton("Autophase 0th")
autoZero.clicked.connect(lambda: self.autophase(0))
self.zeroOrderFrame.addWidget(autoZero, 0, 1)
self.zeroEntry = wc.QLineEdit("0.000", self.inputZeroOrder)
self.zeroOrderFrame.addWidget(self.zeroEntry, 2, 1)
self.leftZero = QtWidgets.QPushButton("<")
self.leftZero.clicked.connect(lambda: self.stepPhase(-1, 0))
self.leftZero.setAutoRepeat(True)
self.zeroOrderFrame.addWidget(self.leftZero, 2, 0)
self.rightZero = QtWidgets.QPushButton(">")
self.rightZero.clicked.connect(lambda: self.stepPhase(1, 0))
self.rightZero.setAutoRepeat(True)
self.zeroOrderFrame.addWidget(self.rightZero, 2, 2)
self.zeroScale = wc.SsnakeSlider(QtCore.Qt.Horizontal)
self.zeroScale.setRange(-self.RESOLUTION, self.RESOLUTION)
self.zeroScale.valueChanged.connect(self.setZeroOrder)
self.zeroOrderFrame.addWidget(self.zeroScale, 3, 0, 1, 3)
self.zeroOrderGroup.setLayout(self.zeroOrderFrame)
self.grid.addWidget(self.zeroOrderGroup, 0, 0, 1, 3)
# First order
self.firstOrderGroup = QtWidgets.QGroupBox('First order:')
self.firstOrderFrame = QtWidgets.QGridLayout()
autoFirst = QtWidgets.QPushButton("Autophase 0th+1st")
autoFirst.clicked.connect(lambda: self.autophase(1))
self.firstOrderFrame.addWidget(autoFirst, 5, 1)
self.firstEntry = wc.QLineEdit("0.000", self.inputFirstOrder)
self.firstOrderFrame.addWidget(self.firstEntry, 6, 1)
self.leftFirst = QtWidgets.QPushButton("<")
self.leftFirst.clicked.connect(lambda: self.stepPhase(0, -1))
self.leftFirst.setAutoRepeat(True)
self.firstOrderFrame.addWidget(self.leftFirst, 6, 0)
self.rightFirst = QtWidgets.QPushButton(">")
self.rightFirst.clicked.connect(lambda: self.stepPhase(0, 1))
self.rightFirst.setAutoRepeat(True)
self.firstOrderFrame.addWidget(self.rightFirst, 6, 2)
self.firstScale = wc.SsnakeSlider(QtCore.Qt.Horizontal)
self.firstScale.setRange(-self.RESOLUTION, self.RESOLUTION)
self.firstScale.valueChanged.connect(self.setFirstOrder)
self.firstOrderFrame.addWidget(self.firstScale, 7, 0, 1, 3)
if self.father.current.spec() > 0:
self.firstOrderFrame.addWidget(wc.QLabel("Pivot point [Hz]:"), 8, 0, 1, 3)
pickRef = QtWidgets.QPushButton("Pick pivot")
pickRef.clicked.connect(self.pickRef)
self.firstOrderFrame.addWidget(pickRef, 9, 1)
self.refEntry = wc.QLineEdit(('%.3f' % self.pivotVal), self.inputRef)
self.firstOrderFrame.addWidget(self.refEntry, 10, 1)
self.firstOrderGroup.setLayout(self.firstOrderFrame)
self.grid.addWidget(self.firstOrderGroup, 1, 0, 1, 3)
def setModifierTexts(self, event):
sign = u"\u00D7"
if event.modifiers() & QtCore.Qt.AltModifier:
sign = '/'
left = [self.leftZero, self.leftFirst]
right = [self.rightZero, self.rightFirst]
if event.modifiers() & QtCore.Qt.ControlModifier and event.modifiers() & QtCore.Qt.ShiftModifier:
text = ' ' + sign + '1000'
elif event.modifiers() & QtCore.Qt.ControlModifier:
text = ' ' + sign + '10'
elif event.modifiers() & QtCore.Qt.ShiftModifier:
text = ' ' + sign + '100'
else:
text = ''
for widget in left:
widget.setText('<' + text)
for widget in right:
widget.setText('>' + text)
def keyPressEvent(self, event):
self.setModifierTexts(event)
def keyReleaseEvent(self, event):
self.setModifierTexts(event)
def setZeroOrder(self, value, *args):
if self.available:
self.zeroVal = float(value) / self.RESOLUTION * 180
self.zeroEntry.setText('%.3f' % self.zeroVal)
self.father.current.setPhaseInter(np.pi * self.zeroVal / 180.0, np.pi * self.firstVal / 180.0)
def inputZeroOrder(self, *args):
inp = safeEval(self.zeroEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException('Phasing: zero order value input is not valid!')
self.zeroVal = np.mod(inp + 180, 360) - 180
self.zeroEntry.setText('%.3f' % self.zeroVal)
self.available = False
self.zeroScale.setValue(int(round(self.zeroVal / 180.0 * self.RESOLUTION)))
self.available = True
self.father.current.setPhaseInter(np.pi * self.zeroVal / 180.0, np.pi * self.firstVal / 180.0)
def setFirstOrder(self, value, *args):
if self.available:
value = float(value) / self.RESOLUTION * self.P1LIMIT
newZero = (self.zeroVal - (value - self.firstVal) * self.pivotVal / self.father.current.sw())
self.zeroVal = np.mod(newZero + 180, 360) - 180
self.zeroEntry.setText('%.3f' % self.zeroVal)
self.firstVal = value
self.firstEntry.setText('%.3f' % self.firstVal)
self.available = False
self.zeroScale.setValue(int(round(self.zeroVal / 180.0 * self.RESOLUTION)))
self.available = True
self.father.current.setPhaseInter(np.pi * self.zeroVal / 180.0, np.pi * self.firstVal / 180.0)
def inputFirstOrder(self, *args):
value = safeEval(self.firstEntry.text(), length=self.father.current.len(), Type='FI')
if value is None:
raise SsnakeException('Phasing: first order value input is not valid!')
newZero = (self.zeroVal - (value - self.firstVal) * self.pivotVal / self.father.current.sw())
self.zeroVal = np.mod(newZero + 180, 360) - 180
self.zeroEntry.setText('%.3f' % self.zeroVal)
self.firstVal = value
self.firstEntry.setText('%.3f' % self.firstVal)
self.available = False
self.zeroScale.setValue(int(round(self.zeroVal / 180.0 * self.RESOLUTION)))
self.firstScale.setValue(int(round(self.firstVal / self.P1LIMIT * self.RESOLUTION)))
self.available = True
self.father.current.setPhaseInter(np.pi * self.zeroVal / 180.0, np.pi * self.firstVal / 180.0)
def autophase(self, num):
phases = self.father.current.autoPhase(num)
val = phases[0] / np.pi * 180.0
self.zeroVal = (np.mod(val + 180, 360) - 180)
self.zeroEntry.setText('%.3f' % self.zeroVal)
self.available = False
self.zeroScale.setValue(round(self.zeroVal / 180.0 * self.RESOLUTION))
self.available = True
if num == 1:
val = phases[1] / np.pi * 180.0
self.firstVal = val
self.firstEntry.setText('%.3f' % self.firstVal)
self.inputFirstOrder()
def stepPhase(self, phase0, phase1):
step = 1
multiplier = 1
if QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier and QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
multiplier *= 1000
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier:
multiplier *= 10
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
multiplier *= 100
if QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.AltModifier:
step = step / multiplier
else:
step = step * multiplier
phase0 = step * phase0
phase1 = step * phase1
inp = safeEval(self.zeroEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException('Phasing: zero order value input is not valid!')
inp += phase0 * self.PHASE0STEP
self.zeroVal = np.mod(inp + 180, 360) - 180
value = safeEval(self.firstEntry.text(), length=self.father.current.len(), Type='FI')
if value is None:
raise SsnakeException('Phasing: first order value input is not valid!')
value += phase1 * self.PHASE1STEP
if self.father.current.spec() > 0:
self.inputRef()
value += phase1 * self.PHASE1STEP
newZero = (self.zeroVal - (value - self.firstVal) * self.pivotVal / self.father.current.sw())
self.zeroVal = np.mod(newZero + 180, 360) - 180
self.zeroEntry.setText('%.3f' % self.zeroVal)
self.firstVal = value
self.firstEntry.setText('%.3f' % self.firstVal)
self.available = False
self.zeroScale.setValue(round(self.zeroVal / 180.0 * self.RESOLUTION))
self.firstScale.setValue(round(self.firstVal / self.P1LIMIT * self.RESOLUTION))
self.available = True
self.father.current.setPhaseInter(np.pi * self.zeroVal / 180.0, np.pi * self.firstVal / 180.0)
def inputRef(self, *args):
Val = safeEval(self.refEntry.text(), length=self.father.current.len(), Type='FI')
if Val is None:
raise SsnakeException('Phasing: pivot input is not valid!')
self.pivotVal = Val
self.refEntry.setText('%.3f' % self.pivotVal)
def setRef(self, value, *args):
self.pivotVal = float(value)
self.refEntry.setText('%.3f' % self.pivotVal)
def pickRef(self, *args):
self.father.current.peakPickFunc = lambda pos, self=self: self.setRef(self.father.current.xax()[pos[0]])
self.father.current.peakPick = True
def applyFunc(self):
if self.father.current.spec() > 0:
self.inputRef()
self.inputZeroOrder()
self.inputFirstOrder()
self.father.current.applyPhase(np.pi * self.zeroVal / 180.0, np.pi * self.firstVal / 180.0, (self.singleSlice.isChecked() == 1))
################################################################
class ApodWindow(wc.ToolWindow):
RESOLUTION = 10000
NAME = "Apodize"
SINGLESLICE = True
def __init__(self, parent):
super(ApodWindow, self).__init__(parent)
self.entries = {}
self.ticks = {}
boldFont = QtGui.QFont()
boldFont.setBold(True)
self.maximum = 100.0 * self.father.current.sw() / (self.father.current.len())
self.lbstep = 1.0
self.available = True
self.lorGroup = QtWidgets.QGroupBox()
self.lorFrame = QtWidgets.QGridLayout()
lorTick = QtWidgets.QCheckBox("Lorentzian [Hz]:")
lorTick.setFont(boldFont)
lorTick.toggled.connect(lambda x: self.checkEval('lor'))
self.lorFrame.addWidget(lorTick, 0, 0, 1, 3)
self.ticks['lor'] = lorTick
lorEntry = wc.QLineEdit("0.00", self.apodPreview)
lorEntry.setMinimumWidth(150)
lorEntry.setEnabled(False)
self.lorFrame.addWidget(lorEntry, 1, 1)
self.entries['lor'] = [lorEntry]
self.leftLor = QtWidgets.QPushButton("<")
self.leftLor.clicked.connect(lambda: self.stepLB(-0.5 * self.father.current.sw() / (self.father.current.len()), 'lor'))
self.leftLor.setAutoRepeat(True)
self.lorFrame.addWidget(self.leftLor, 1, 0)
self.rightLor = QtWidgets.QPushButton(">")
self.rightLor.clicked.connect(lambda: self.stepLB(0.5 * self.father.current.sw() / (self.father.current.len()), 'lor'))
self.rightLor.setAutoRepeat(True)
self.lorFrame.addWidget(self.rightLor, 1, 2)
self.lorScale = wc.SsnakeSlider(QtCore.Qt.Horizontal)
self.lorScale.setRange(0, self.RESOLUTION)
self.lorScale.valueChanged.connect(lambda x: self.setLorGauss(x, 'lor'))
self.lorFrame.addWidget(self.lorScale, 2, 0, 1, 3)
self.lorMax = 100.0 * self.father.current.sw() / (self.father.current.len())
self.lorGroup.setLayout(self.lorFrame)
self.grid.addWidget(self.lorGroup, 0, 0, 1, 3)
self.gaussGroup = QtWidgets.QGroupBox()
self.gaussFrame = QtWidgets.QGridLayout()
gaussTick = QtWidgets.QCheckBox("Gaussian [Hz]:")
gaussTick.setFont(boldFont)
gaussTick.toggled.connect(lambda: self.checkEval('gauss'))
self.gaussFrame.addWidget(gaussTick, 3, 0, 1, 3)
self.ticks['gauss'] = gaussTick
gaussEntry = wc.QLineEdit("0.00", self.apodPreview)
gaussEntry.setEnabled(False)
gaussEntry.setMinimumWidth(150)
self.gaussFrame.addWidget(gaussEntry, 4, 1)
self.entries['gauss'] = [gaussEntry]
self.leftGauss = QtWidgets.QPushButton("<")
self.leftGauss.clicked.connect(lambda: self.stepLB(-0.5 * self.father.current.sw() / (self.father.current.len()), 'gauss'))
self.leftGauss.setAutoRepeat(True)
self.gaussFrame.addWidget(self.leftGauss, 4, 0)
self.rightGauss = QtWidgets.QPushButton(">")
self.rightGauss.clicked.connect(lambda: self.stepLB(0.5 * self.father.current.sw() / (self.father.current.len()), 'gauss'))
self.rightGauss.setAutoRepeat(True)
self.gaussFrame.addWidget(self.rightGauss, 4, 2)
self.gaussScale = wc.SsnakeSlider(QtCore.Qt.Horizontal)
self.gaussScale.setRange(0, self.RESOLUTION)
self.gaussScale.valueChanged.connect(lambda x: self.setLorGauss(x, 'gauss'))
self.gaussFrame.addWidget(self.gaussScale, 5, 0, 1, 3)
self.gaussMax = 100.0 * self.father.current.sw() / (self.father.current.len())
self.gaussGroup.setLayout(self.gaussFrame)
self.grid.addWidget(self.gaussGroup, 1, 0, 1, 3)
self.cos2Group = QtWidgets.QGroupBox()
self.cos2Frame = QtWidgets.QGridLayout()
cos2Tick = QtWidgets.QCheckBox("Cos^2:")
cos2Tick.setFont(boldFont)
cos2Tick.clicked.connect(lambda: self.checkEval('cos2'))
self.cos2Frame.addWidget(cos2Tick, 0, 0, 1, 2)
self.ticks['cos2'] = cos2Tick
cos2Entry = wc.QLineEdit("1", self.apodPreview)
cos2Entry.setEnabled(False)
widthHint = cos2Entry.minimumSizeHint()
widthHint.setWidth(widthHint.width() *4)
cos2Entry.setMinimumSize(widthHint)
self.cos2Frame.addWidget(cos2Entry, 1, 2)
self.entries['cos2'] = [cos2Entry]
cos2Label = wc.QLeftLabel("Frequency:")
cos2Label.setEnabled(False)
self.entries['cos2'].append(cos2Label)
self.cos2Frame.addWidget(cos2Label, 1, 0)
cos2DegEntry = wc.QLineEdit("0", self.apodPreview)
cos2DegEntry.setEnabled(False)
cos2DegEntry.setMinimumSize(widthHint)
self.cos2Frame.addWidget(cos2DegEntry, 2, 2)
self.entries['cos2'].append(cos2DegEntry)
cos2PhLabel = wc.QLeftLabel("Phase [deg]:")
cos2PhLabel.setEnabled(False)
self.entries['cos2'].append(cos2PhLabel)
self.cos2Frame.addWidget(QtWidgets.QWidget(), 1, 1)
self.cos2Frame.setColumnStretch(1, 1)
self.cos2Frame.addWidget(cos2PhLabel, 2, 0)
self.cos2Group.setLayout(self.cos2Frame)
self.grid.addWidget(self.cos2Group, 2, 0, 1, 3)
self.hammingGroup = QtWidgets.QGroupBox()
self.hammingFrame = QtWidgets.QGridLayout()
hammingTick = QtWidgets.QCheckBox("Hamming:")
hammingTick.setFont(boldFont)
hammingTick.clicked.connect(lambda: self.checkEval('hamming'))
self.hammingFrame.addWidget(hammingTick, 0, 0, 1, 2)
self.ticks['hamming'] = hammingTick
hammingLabel = wc.QLeftLabel("Frequency:\t")
hammingLabel.setEnabled(False)
self.entries['hamming'] = [hammingLabel]
self.hammingFrame.addWidget(hammingLabel, 1, 0)
hammingEntry = wc.QLineEdit("1", self.apodPreview)
hammingEntry.setEnabled(False)
hammingEntry.setMinimumSize(widthHint)
self.hammingFrame.addWidget(hammingEntry, 1, 2)
self.entries['hamming'].append(hammingEntry)
self.hammingFrame.addWidget(QtWidgets.QWidget(), 1, 1)
self.hammingFrame.setColumnStretch(1, 1)
self.hammingGroup.setLayout(self.hammingFrame)
self.grid.addWidget(self.hammingGroup, 3, 0, 1, 3)
self.shiftGroup = QtWidgets.QGroupBox()
self.shiftFrame = QtWidgets.QGridLayout()
shiftTick = QtWidgets.QCheckBox("Shift:")
shiftTick.setFont(boldFont)
shiftTick.clicked.connect(lambda: self.checkEval('shift'))
self.ticks['shift'] = shiftTick
self.shiftFrame.addWidget(shiftTick, 0, 0)
shiftLabel = wc.QLeftLabel("Value [s]:")
shiftLabel.setEnabled(False)
self.shiftFrame.addWidget(shiftLabel, 1, 0)
shiftEntry = wc.QLineEdit("0", self.apodPreview)
self.shiftFrame.addWidget(QtWidgets.QWidget(), 1, 1)
shiftEntry.setMinimumSize(widthHint)
shiftEntry.setEnabled(False)
self.entries['shift'] = [shiftEntry, shiftLabel]
self.shiftFrame.addWidget(shiftEntry, 1, 2)
self.shiftGroup.setLayout(self.shiftFrame)
self.shiftFrame.setColumnStretch(1, 1)
self.grid.addWidget(self.shiftGroup, 4, 0, 1, 3)
if self.father.current.data.ndim() > 1:
self.shiftingGroup = QtWidgets.QGroupBox()
self.shiftingFrame = QtWidgets.QGridLayout()
shiftingTick = QtWidgets.QCheckBox("Shifting:")
shiftingTick.clicked.connect(lambda: self.checkEval('shifting'))
shiftingTick.setFont(boldFont)
self.ticks['shifting'] = shiftingTick
self.shiftingFrame.addWidget(shiftingTick, 0, 0)
self.shiftingDropdown = QtWidgets.QComboBox()
self.shiftingDropdown.addItems(['User Defined', 'Spin 3/2, -3Q (7/9)', 'Spin 5/2, 3Q (19/12)',
'Spin 5/2, -5Q (25/12)', 'Spin 7/2, 3Q (101/45)', 'Spin 7/2, 5Q (11/9)',
'Spin 7/2, -7Q (161/45)', 'Spin 9/2, 3Q (91/36)', 'Spin 9/2, 5Q (95/36)',
'Spin 9/2, 7Q (7/18)', 'Spin 9/2, -9Q (31/6)'])
self.shiftingDropdown.activated.connect(self.dropdownChanged)
self.shiftingList = [0, 7.0 / 9.0, 19.0 / 12.0,
25.0 / 12.0, 101.0 / 45.0, 11.0 / 9.0,
161.0 / 45.0, 91.0 / 36.0, 95.0 / 36.0,
7.0 / 18.0, 31.0 / 6.0]
self.shiftingDropdown.setMinimumSize(widthHint)
self.shiftingDropdown.setEnabled(False)
self.shiftingFrame.addWidget(self.shiftingDropdown, 1, 2)
shiftingTypeLabel = wc.QLeftLabel("Type:")
shiftingTypeLabel.setEnabled(False)
self.shiftingFrame.addWidget(shiftingTypeLabel, 1, 0)
self.shiftingEntry = wc.QLineEdit("0.00", self.apodPreview)
self.shiftingEntry.setEnabled(False)
self.shiftingEntry.setMinimumSize(widthHint)
self.shiftingFrame.addWidget(self.shiftingEntry, 2, 2)
shiftingValueLabel = wc.QLeftLabel("Value:")
shiftingValueLabel.setEnabled(False)
self.shiftingFrame.addWidget(shiftingValueLabel, 2, 0)
self.shiftingAxis = QtWidgets.QComboBox()
self.shiftingValues = list(map(str, np.delete(range(1, self.father.current.data.ndim() + 1), self.father.current.axes[-1])))
self.shiftingAxis.addItems(self.shiftingValues)
self.shiftingAxis.currentIndexChanged.connect(self.apodPreview)
self.shiftingAxis.setMinimumSize(widthHint)
self.shiftingAxis.setEnabled(False)
self.shiftingFrame.addWidget(self.shiftingAxis, 3, 2)
shiftingAxisLabel = wc.QLeftLabel("Axis:")
shiftingAxisLabel.setEnabled(False)
self.shiftingFrame.addWidget(shiftingAxisLabel, 3, 0)
self.shiftingFrame.addWidget(QtWidgets.QWidget(), 1, 1)
self.shiftingFrame.setColumnStretch(1, 1)
self.entries['shifting'] = [self.shiftingDropdown, shiftingTypeLabel, self.shiftingEntry, shiftingValueLabel, self.shiftingAxis, shiftingAxisLabel]
self.shiftingGroup.setLayout(self.shiftingFrame)
self.grid.addWidget(self.shiftingGroup, 5, 0, 1, 3)
def setModifierTexts(self, event):
sign = u"\u00D7"
if event.modifiers() & QtCore.Qt.AltModifier:
sign = '/'
left = [self.leftLor, self.leftGauss]
right = [self.rightLor, self.rightGauss]
if event.modifiers() & QtCore.Qt.ControlModifier and event.modifiers() & QtCore.Qt.ShiftModifier:
text = ' ' + sign + '1000'
elif event.modifiers() & QtCore.Qt.ControlModifier:
text = ' ' + sign + '10'
elif event.modifiers() & QtCore.Qt.ShiftModifier:
text = ' ' + sign + '100'
else:
text = ''
for widget in left:
widget.setText('<' + text)
for widget in right:
widget.setText('>' + text)
def keyPressEvent(self, event):
self.setModifierTexts(event)
def keyReleaseEvent(self, event):
self.setModifierTexts(event)
def dropdownChanged(self, update=True):
index = self.shiftingDropdown.currentIndex()
if index == 0:
self.shiftingEntry.setEnabled(True)
else:
self.shiftingEntry.setEnabled(False)
if update:
self.shiftingEntry.setText("%.9f" % self.shiftingList[index])
self.apodPreview()
def checkEval(self, key):
if self.ticks[key].isChecked():
for elem in self.entries[key]:
elem.setEnabled(True)
else:
for elem in self.entries[key]:
elem.setEnabled(False)
if self.father.current.data.ndim() > 1:
if self.ticks['shifting'].isChecked():
self.dropdownChanged(update=False) #Check dropdown state
if key in ('lor', 'gauss'): # for lorentzian and gaussian
if safeEval(self.entries[key][0].text(), length=self.father.current.len(), Type='FI') != 0.0: # only update if value was not zero
self.apodPreview()
else:
self.apodPreview()
def setLorGauss(self,value, type, *args):
#type: 'lor' or 'gauss'
if self.available:
self.entries[type][0].setText('%.4g' % (float(value) * self.maximum / self.RESOLUTION))
if not self.ticks[type].isChecked():
self.ticks[type].setChecked(1)
self.apodPreview()
def apodPreview(self, *args):
self.available = False #turn off gauss/lorentz callbacks
lor, gauss, cos2, cos2Ph, hamming, shift, shifting, shiftingAxis = self.checkInput()
self.available = True
self.father.current.apodPreview(lor, gauss, [cos2, cos2Ph], hamming, shift, shifting, shiftingAxis)
def stepLB(self, incr, type):
step = incr * self.lbstep
multiplier = 1
if QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier and QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
multiplier = 1000
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier:
multiplier = 10
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
multiplier = 100
if QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.AltModifier:
step = step / multiplier
else:
step = step * multiplier
if not self.ticks[type].isChecked():
self.ticks[type].setChecked(1)
lor, gauss, cos2, cos2Ph, hamming, shift, shifting, shiftingAxis = self.checkInput()
if type == 'lor':
self.entries[type][0].setText('%.4g' % (lor + step))
elif type == 'gauss':
self.entries[type][0].setText('%.4g' % (gauss + step))
self.apodPreview()
def checkInput(self):
lor = None
gauss = None
cos2 = None
cos2Ph = None
hamming = None
shift = 0.0
shifting = 0.0
shiftingAxis = None
if self.ticks['lor'].isChecked():
lor = safeEval(self.entries['lor'][0].text(), length=self.father.current.len(), Type='FI')
if lor is None:
self.father.current.showFid()
raise SsnakeException('Apodize: Lorentzian value is not valid!')
self.lorScale.setValue(int(round(lor * self.RESOLUTION / self.maximum)))
if self.ticks['gauss'].isChecked():
gauss = safeEval(self.entries['gauss'][0].text(), length=self.father.current.len(), Type='FI')
if gauss is None:
self.father.current.showFid()
raise SsnakeException('Apodize: Gaussian value is not valid!')
self.gaussScale.setValue(int(round(gauss * self.RESOLUTION / self.maximum)))
if self.ticks['cos2'].isChecked():
cos2 = safeEval(self.entries['cos2'][0].text(), length=self.father.current.len(), Type='FI')
if cos2 is None:
self.father.current.showFid()
raise SsnakeException('Apodize: cos^2 frequency value is not valid!')
if self.ticks['cos2'].isChecked():
cos2Ph = safeEval(self.entries['cos2'][2].text(), length=self.father.current.len(), Type='FI')
if cos2Ph is None:
self.father.current.showFid()
raise SsnakeException('Apodize: cos^2 phase value is not valid!')
if self.ticks['hamming'].isChecked():
hamming = safeEval(self.entries['hamming'][1].text(), length=self.father.current.len(), Type='FI')
if hamming is None:
self.father.current.showFid()
raise SsnakeException('Apodize: Hamming value is not valid!')
if self.ticks['shift'].isChecked():
shift = safeEval(self.entries['shift'][0].text(), length=self.father.current.len(), Type='FI')
if shift is None:
self.father.current.showFid()
raise SsnakeException('Apodize: Shift value is not valid!')
if self.father.current.data.ndim() > 1:
if self.ticks['shifting'].isChecked():
if self.father.current.data.ndim() > 1:
shifting = safeEval(self.shiftingEntry.text(), length=self.father.current.len(), Type='FI')
if shifting is None:
self.father.current.showFid()
raise SsnakeException('Apodize: Shifting value is not valid!')
shiftingAxis = int(self.shiftingValues[self.shiftingAxis.currentIndex()]) - 1
else:
shiftingAxis = None
return lor, gauss, cos2, cos2Ph, hamming, shift, shifting, shiftingAxis
def applyFunc(self):
lor, gauss, cos2, cos2Ph, hamming, shift, shifting, shiftingAxis = self.checkInput()
self.father.current.applyApod(lor, gauss, [cos2, cos2Ph], hamming, shift, shifting, shiftingAxis, (self.singleSlice.isChecked()))
#######################################################################################
class SizeWindow(wc.ToolWindow):
NAME = "Set size"
def __init__(self, parent):
super(SizeWindow, self).__init__(parent)
self.sizeGroup = QtWidgets.QGroupBox('Size:')
self.sizeFrame = QtWidgets.QGridLayout()
self.sizeVal = parent.current.len()
self.sizeEntry = wc.QLineEdit(self.sizeVal, self.sizePreview)
self.sizeEntry.setMinimumWidth(100)
self.sizeFrame.addWidget(self.sizeEntry, 0, 1)
rightPower = QtWidgets.QPushButton("+ 2^n")
rightPower.clicked.connect(lambda: self.stepSize(True))
# rightZero.setAutoRepeat(True)
self.sizeFrame.addWidget(rightPower, 0, 2)
leftPower = QtWidgets.QPushButton("- 2^n")
leftPower.clicked.connect(lambda: self.stepSize(False))
self.sizeFrame.addWidget(leftPower, 0, 0)
self.sizeGroup.setLayout(self.sizeFrame)
self.grid.addWidget(self.sizeGroup, 0, 0, 1, 3)
# offset
self.offGroup = QtWidgets.QGroupBox('Offset:')
self.offFrame = QtWidgets.QGridLayout()
if self.father.current.wholeEcho():
self.posVal = int(np.floor(parent.current.len() / 2.0))
else:
self.posVal = parent.current.len()
self.posEntry = wc.QLineEdit(self.posVal, self.sizePreview)
self.offFrame.addWidget(self.posEntry, 0, 1)
self.offGroup.setLayout(self.offFrame)
self.grid.addWidget(self.offGroup, 1, 0, 1, 3)
if not self.father.current.spec():
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def stepSize(self, forward):
inp = safeEval(self.sizeEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException('Sizing: \'Size\' input is not valid')
inp = int(round(inp))
if inp < 1:
raise SsnakeException('Sizing: \'Size\' cannot be below 1')
if forward: # If + button
new = int(np.floor(np.log2(inp)) + 1)
else:
new = int(np.ceil(np.log2(inp)) - 1)
if new < 0:
new = 0
self.sizeEntry.setText(str(2**new))
self.sizePreview()
def sizePreview(self, *args):
inp = safeEval(self.sizeEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException('Sizing: \'Size\' input is not valid')
self.sizeVal = int(round(inp))
if self.sizeVal < 1:
raise SsnakeException('Sizing: \'Size\' cannot be below 1')
self.sizeEntry.setText(str(self.sizeVal))
inp = safeEval(self.posEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException('Sizing: \'Offset\' input is not valid')
self.posVal = int(round(inp))
if self.posVal < 1:
raise SsnakeException('Sizing: \'Offset\' cannot be below 1')
self.posEntry.setText(str(self.posVal))
self.father.current.resizePreview(self.sizeVal, self.posVal)
def applyFunc(self):
inp = safeEval(self.sizeEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException('Sizing: \'Size\' input is not valid')
self.sizeVal = int(round(inp))
if self.sizeVal < 1:
raise SsnakeException('Sizing: \'Size\' cannot be below 1')
inp = safeEval(self.posEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException('Sizing: \'Offset\' input is not valid')
self.posVal = int(round(inp))
if self.posVal < 1:
raise SsnakeException('Sizing: \'Offset\' cannot be below 1')
self.father.current.resize(self.sizeVal, self.posVal)
self.father.sideframe.upd()
def picked(self, pos):
self.posEntry.setText(str(pos[0]))
self.sizePreview()
self.father.current.peakPick = True
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
##########################################################################################
class SwapEchoWindow(wc.ToolWindow):
NAME = "Swap echo"
def __init__(self, parent):
super(SwapEchoWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Echo position:"), 0, 0)
self.posVal = int(round(0.5 * parent.current.len()))
self.posEntry = wc.QLineEdit(self.posVal, self.swapEchoPreview)
self.grid.addWidget(self.posEntry, 1, 0)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def swapEchoPreview(self, *args):
inp = safeEval(self.posEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Swap echo: not a valid index")
self.posVal = int(round(inp))
self.posEntry.setText(str(self.posVal))
if self.posVal > 0 and self.posVal < self.father.current.len():
self.father.current.swapEchoPreview(self.posVal)
self.father.current.peakPick = False
def applyFunc(self):
self.father.current.peakPickReset()
inp = safeEval(self.posEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Swap echo: not a valid index")
self.posVal = int(round(inp))
self.posEntry.setText(str(self.posVal))
if self.posVal < 0 or self.posVal >= (self.father.current.len()):
raise SsnakeException("Swap echo: not a valid index")
self.father.current.swapEcho(self.posVal)
self.father.bottomframe.upd()
def picked(self, pos):
self.father.current.swapEchoPreview(pos[0])
self.posEntry.setText(str(pos[0]))
self.father.current.peakPick = False
###########################################################################
class LPSVDWindow(wc.ToolWindow):
NAME = "LPSVD"
def __init__(self, parent):
super(LPSVDWindow, self).__init__(parent)
self.specGroup = QtWidgets.QButtonGroup(self)
backwardButton = QtWidgets.QRadioButton('Backward', parent=self)
self.specGroup.addButton(backwardButton, 1)
forwardButton = QtWidgets.QRadioButton('Forward', parent=self)
self.specGroup.addButton(forwardButton, 0)
self.grid.addWidget(backwardButton, 1, 0)
self.grid.addWidget(forwardButton, 2, 0)
backwardButton.setChecked(True)
self.grid.addWidget(wc.QLabel("Number of points for analysis:"), 3, 0)
analPoints = int(np.floor(self.father.current.len() * 3 / 4.0))
self.aPointsEntry = wc.QLineEdit(analPoints)
self.grid.addWidget(self.aPointsEntry, 4, 0)
self.grid.addWidget(wc.QLabel("Max number of frequencies:"), 5, 0)
numberFreq = 20
self.nFreqEntry = wc.QLineEdit(numberFreq)
self.grid.addWidget(self.nFreqEntry, 6, 0)
self.grid.addWidget(wc.QLabel("Number of points to predict:"), 7, 0)
predictPoints = 10
self.nPredictEntry = wc.QLineEdit(predictPoints)
self.grid.addWidget(self.nPredictEntry, 8, 0)
def applyFunc(self):
analPoints = safeEval(self.aPointsEntry.text(), length=self.father.current.len(), Type='FI')
if analPoints is None:
raise SsnakeException('LPSVD: Number of points for analysis is not valid')
numberFreq = safeEval(self.nFreqEntry.text(), length=self.father.current.len(), Type='FI')
if numberFreq is None:
raise SsnakeException('LPSVD: Number of frequencies is not valid')
predictPoints = safeEval(self.nPredictEntry.text(), length=self.father.current.len(), Type='FI')
if predictPoints is None:
raise SsnakeException('LPSVD: Number of points to predict is not valid')
if analPoints > self.father.current.len():
raise SsnakeException('LPSVD: Number of points for analysis cannot be larger than data size')
if analPoints < 2:
raise SsnakeException('LPSVD: Number of points for analysis should be at least 2')
if self.specGroup.checkedId() == 0:
forward = True
else:
forward = False
self.father.current.lpsvd(predictPoints, numberFreq, forward, analPoints)
self.father.sideframe.upd()
###########################################################################
class ScaleSWWindow(wc.ToolWindow):
NAME = "Scale SW"
def __init__(self, parent):
super(ScaleSWWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Scale Factor:"), 0, 0)
self.scaleDropdown = QtWidgets.QComboBox()
self.scaleDropdown.addItems(['User Defined', 'Spin 3/2, -3Q (9/34)', 'Spin 5/2, 3Q (-12/17)', 'Spin 5/2, -5Q (12/85)', 'Spin 7/2, 3Q (-45/34)',
'Spin 7/2, 5Q (-9/34)', 'Spin 7/2, -7Q (45/476)', 'Spin 9/2, 3Q (-36/17)', 'Spin 9/2, 5Q (-36/85)', 'Spin 9/2, 7Q (-18/117)', 'Spin 9/2, -9Q (6/85)'])
self.scaleDropdown.activated.connect(self.dropdownChanged)
self.scaleList = [0, 9.0/34.0, -12.0/17.0, 12.0/85.0, -45.0/34.0, -9/34.0, 45.0/476.0, -36.0/17.0, -36.0/85.0, -18.0/117.0, 6.0/85.0]
self.grid.addWidget(self.scaleDropdown, 1, 0)
self.scaleEntry = wc.QLineEdit("0.0")
self.grid.addWidget(self.scaleEntry, 3, 0)
def dropdownChanged(self):
index = self.scaleDropdown.currentIndex()
self.scaleEntry.setText("%.9f" % self.scaleList[index])
def applyFunc(self):
scale = safeEval(self.scaleEntry.text(), length=self.father.current.len(), Type='FI')
if scale is None:
raise SsnakeException("Scale SW: Factor not a valid value")
self.father.current.scaleSw(scale)
###########################################################################
class ShiftDataWindow(wc.ToolWindow):
NAME = "Shifting data"
SINGLESLICE = True
def __init__(self, parent):
super(ShiftDataWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Data points to shift:"), 0, 0, 1, 3)
self.shiftVal = 0
self.shiftEntry = wc.QLineEdit(self.shiftVal, self.shiftPreview)
self.shiftEntry.setMinimumWidth(100)
self.grid.addWidget(self.shiftEntry, 1, 1)
leftShift = QtWidgets.QPushButton("<")
leftShift.clicked.connect(self.stepDownShift)
leftShift.setAutoRepeat(True)
self.grid.addWidget(leftShift, 1, 0)
rightShift = QtWidgets.QPushButton(">")
rightShift.clicked.connect(self.stepUpShift)
rightShift.setAutoRepeat(True)
self.grid.addWidget(rightShift, 1, 2)
def stepUpShift(self, *args):
inp = safeEval(self.shiftEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Shift data: shift value not valid")
self.shiftVal = int(round(inp))
shift = 1
if QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier and QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
shift *= 1000
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier:
shift *= 10
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
shift *= 100
self.shiftVal = self.shiftVal + shift
self.shiftEntry.setText(str(self.shiftVal))
self.shiftPreview()
def stepDownShift(self, *args):
inp = safeEval(self.shiftEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Shift data: shift value not valid")
self.shiftVal = int(round(inp))
shift = -1
if QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier and QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
shift *= 1000
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier:
shift *= 10
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
shift *= 100
self.shiftVal = self.shiftVal + shift
self.shiftEntry.setText(str(self.shiftVal))
self.shiftPreview()
def shiftPreview(self, *args):
inp = safeEval(self.shiftEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Shift data: shift value not valid")
self.shiftVal = int(round(inp))
self.shiftEntry.setText(str(self.shiftVal))
self.father.current.shiftPreview(self.shiftVal)
def applyFunc(self):
inp = safeEval(self.shiftEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Shift data: shift value not valid")
shift = int(round(inp))
self.father.current.shift(shift, (self.singleSlice.isChecked()))
###########################################################################
class RollDataWindow(wc.ToolWindow):
NAME = "Roll data"
SINGLESLICE = True
def __init__(self, parent):
super(RollDataWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Data points to roll:"), 0, 0, 1, 3)
self.shiftVal = 0
self.shiftEntry = wc.QLineEdit(self.shiftVal, self.rollPreview)
self.shiftEntry.setMinimumWidth(100)
self.grid.addWidget(self.shiftEntry, 1, 1)
leftShift = QtWidgets.QPushButton("<")
leftShift.clicked.connect(self.stepDownShift)
leftShift.setAutoRepeat(True)
self.grid.addWidget(leftShift, 1, 0)
rightShift = QtWidgets.QPushButton(">")
rightShift.clicked.connect(self.stepUpShift)
rightShift.setAutoRepeat(True)
self.grid.addWidget(rightShift, 1, 2)
def stepUpShift(self, *args):
inp = safeEval(self.shiftEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Roll data: roll value not valid")
self.shiftVal = inp
shift = 1
if QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier and QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
shift *= 1000
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier:
shift *= 10
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
shift *= 100
self.shiftVal = self.shiftVal + shift
self.shiftEntry.setText(str(self.shiftVal))
self.rollPreview()
def stepDownShift(self, *args):
inp = safeEval(self.shiftEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Roll data: roll value not valid")
self.shiftVal = inp
shift = -1
if QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier and QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
shift *= 1000
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ControlModifier:
shift *= 10
elif QtWidgets.qApp.keyboardModifiers() & QtCore.Qt.ShiftModifier:
shift *= 100
self.shiftVal = self.shiftVal + shift
self.shiftEntry.setText(str(self.shiftVal))
self.rollPreview()
def rollPreview(self, *args):
inp = safeEval(self.shiftEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Roll data: roll value not valid")
self.shiftVal = inp
self.shiftEntry.setText(str(self.shiftVal))
self.father.current.rollPreview(self.shiftVal)
def applyFunc(self):
inp = safeEval(self.shiftEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Roll data: roll value not valid")
shift = inp
self.father.current.roll(shift, (self.singleSlice.isChecked()))
#############################################################
class DCWindow(wc.ToolWindow):
NAME = "Offset correction"
SINGLESLICE = True
def __init__(self, parent):
super(DCWindow, self).__init__(parent)
self.startVal = int(round(0.8 * parent.current.len()))
self.endVal = parent.current.len()
self.grid.addWidget(wc.QLabel("Start index:"), 0, 0)
self.startEntry = wc.QLineEdit(self.startVal, self.offsetPreview)
self.grid.addWidget(self.startEntry, 1, 0)
self.grid.addWidget(wc.QLabel("End index:"), 2, 0)
self.endEntry = wc.QLineEdit(self.endVal, self.offsetPreview)
self.grid.addWidget(self.endEntry, 3, 0)
self.grid.addWidget(wc.QLabel("Offset:"), 4, 0)
val = parent.current.getdcOffset(int(round(0.8 * parent.current.len())), parent.current.len())
self.offsetEntry = wc.QLineEdit('{:.2e}'.format(val), lambda: self.offsetPreview(True))
self.grid.addWidget(self.offsetEntry, 5, 0)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def picked(self, pos, second=False):
dataLength = self.father.current.len()
if second:
inp = safeEval(self.startEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.startVal = int(round(inp))
if self.startVal < 0:
self.startVal = 0
elif self.startVal > dataLength:
self.startVal = dataLength
self.endVal = pos[0]
self.endEntry.setText(str(self.endVal))
if inp is not None:
self.startEntry.setText(str(self.startVal))
val = self.father.current.getdcOffset(self.startVal, self.endVal)
self.offsetEntry.setText('{:.2e}'.format(val))
self.father.current.dcOffset(val)
else:
self.offsetEntry.setText('')
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
else:
self.startEntry.setText(str(pos[0]))
inp = safeEval(self.endEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.endVal = int(round(inp))
if self.endVal < 0:
self.endVal = 0
elif self.endVal > dataLength:
self.endVal = dataLength
self.startVal = pos[0]
if inp is not None:
val = self.father.current.getdcOffset(self.startVal, self.endVal)
self.offsetEntry.setText('{:.2e}'.format(val))
else:
self.offsetEntry.setText('')
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, True)
self.father.current.peakPick = True
def offsetPreview(self, inserted=False):
if inserted:
dcVal = safeEval(self.offsetEntry.text(), length=self.father.current.len(), Type='C')
if dcVal is None:
raise SsnakeException("Offset correction: offset value not valid")
self.father.current.dcOffset(dcVal)
else:
dataLength = self.father.current.len()
inp = safeEval(self.startEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Offset correction: start value not valid")
self.startVal = int(round(inp))
if self.startVal < 0:
self.startVal = 0
elif self.startVal > dataLength:
self.startVal = dataLength
self.startEntry.setText(str(self.startVal))
inp = safeEval(self.endEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Offset correction: end value not valid")
self.endVal = int(round(inp))
if self.endVal < 0:
self.endVal = 0
elif self.endVal > dataLength:
self.endVal = dataLength
self.endEntry.setText(str(self.endVal))
val = self.father.current.getdcOffset(self.startVal, self.endVal)
self.offsetEntry.setText('{:.2e}'.format(val))
self.father.current.dcOffset(val)
def applyFunc(self):
inp = safeEval(self.offsetEntry.text(), length=self.father.current.len(), Type='C')
if inp is None:
raise SsnakeException("Offset correction: offset value not valid")
self.father.current.peakPickReset()
self.father.current.subtract([inp], self.singleSlice.isChecked())
#############################################################
class BaselineWindow(wc.ToolWindow):
NAME = "Baseline correction"
SINGLESLICE = True
TYPES = ['poly','sin/cos']
TYP_NAMES = ['Polynomial','sine/cosine']
def __init__(self, parent):
super(BaselineWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Type:"), 0, 0, 1, 2)
self.typeDropdown = QtWidgets.QComboBox()
self.typeDropdown.addItems(self.TYP_NAMES)
self.grid.addWidget(self.typeDropdown, 1, 0, 1, 2)
self.grid.addWidget(wc.QLabel("Degree:"), 2, 0, 1, 2)
self.removeList = []
self.degreeEntry = wc.SsnakeSpinBox()
self.degreeEntry.setMaximum(10000)
self.degreeEntry.setMinimum(1)
self.degreeEntry.setValue(3)
self.degreeEntry.setAlignment(QtCore.Qt.AlignCenter)
self.grid.addWidget(self.degreeEntry, 3, 0, 1, 2)
self.invertButton = QtWidgets.QCheckBox("Invert selection")
self.invertButton.stateChanged.connect(self.preview)
self.grid.addWidget(self.invertButton, 4, 0, 1, 2)
self.allFitButton = QtWidgets.QCheckBox("Fit traces separately")
self.grid.addWidget(self.allFitButton, 5, 0, 1, 2)
resetButton = QtWidgets.QPushButton("&Reset")
resetButton.clicked.connect(self.reset)
self.grid.addWidget(resetButton, 6, 0)
fitButton = QtWidgets.QPushButton("&Fit")
fitButton.clicked.connect(self.preview)
self.grid.addWidget(fitButton, 6, 1)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def picked(self, pos):
self.removeList.append(pos[0])
self.father.current.previewRemoveList(self.removeList, invert=self.invertButton.isChecked())
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def preview(self, *args):
type = self.TYPES[self.typeDropdown.currentIndex()]
inp = self.degreeEntry.value()
self.father.current.previewRemoveList(self.removeList, invert=self.invertButton.isChecked())
self.father.current.previewBaselineCorrection(inp, self.removeList, type, invert=self.invertButton.isChecked())
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def reset(self, *args):
self.removeList = []
self.father.current.resetPreviewRemoveList()
self.preview()
def closeEvent(self, *args):
self.father.current.removeListLines = []
del self.father.current.removeListLines
super(BaselineWindow, self).closeEvent(*args)
def applyFunc(self):
inp = self.degreeEntry.value()
type = self.TYPES[self.typeDropdown.currentIndex()]
if self.allFitButton.isChecked():
self.father.current.baselineCorrectionAll(inp, self.removeList, type, invert=self.invertButton.isChecked())
else:
self.father.current.baselineCorrection(inp, self.removeList, type, self.singleSlice.isChecked(), invert=self.invertButton.isChecked())
self.father.current.peakPickReset()
self.father.current.resetPreviewRemoveList()
#############################################################
class regionWindow(wc.ToolWindow):
def __init__(self, parent, name):
self.NAME = name
super(regionWindow, self).__init__(parent)
self.startVal = [0] # dummy variables
self.endVal = [parent.current.len()] # dummy variables
self.grid.addWidget(wc.QLabel("Start index:"), 0, 0)
self.grid.addWidget(wc.QLabel("End index:"), 0, 1)
self.startEntry = []
self.endEntry = []
self.deleteButton = []
self.partIter = 0
self.entryCount = 1
self.first = True
self.startEntry.append(wc.QLineEdit(""))
self.startEntry[0].editingFinished.connect(lambda self=self, tmp=self.startEntry[0]: self.setVal(tmp, True))
self.grid.addWidget(self.startEntry[0], 1, 0)
self.endEntry.append(wc.QLineEdit(""))
self.endEntry[0].editingFinished.connect(lambda self=self, tmp=self.endEntry[0]: self.setVal(tmp, False))
self.grid.addWidget(self.endEntry[0], 1, 1)
self.deleteButton.append(QtWidgets.QPushButton("X"))
self.deleteButton[0].clicked.connect(lambda extra, self=self: self.deleteEntry(self.deleteButton[0]))
self.grid.addWidget(self.deleteButton[0], 1, 2)
self.newSpec = QtWidgets.QCheckBox("Result in new workspace")
self.layout.addWidget(self.newSpec, 1, 0, 1, 2)
self.grid.setRowStretch(100, 1)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def addValue(self, pos):
if self.first:
self.startVal[self.partIter] = pos
self.startEntry[self.partIter].setText(str(pos))
self.first = False
else:
tmp = self.startVal[self.partIter]
self.startVal[self.partIter] = min(pos, tmp)
self.endVal[self.partIter] = max(pos, tmp)
self.startVal = np.append(self.startVal, 0)
self.endVal = np.append(self.endVal, self.father.current.len())
self.startEntry[self.partIter].setText(str(self.startVal[self.partIter]))
self.endEntry[self.partIter].setText(str(self.endVal[self.partIter]))
self.partIter += 1
self.startEntry.append(wc.QLineEdit())
self.startEntry[self.partIter].editingFinished.connect(lambda self=self, tmp=self.startEntry[self.partIter]: self.setVal(tmp, True))
self.grid.addWidget(self.startEntry[self.partIter], 1 + self.entryCount, 0)
self.endEntry.append(wc.QLineEdit())
self.endEntry[self.partIter].editingFinished.connect(lambda self=self, tmp=self.endEntry[self.partIter]: self.setVal(tmp, False))
self.grid.addWidget(self.endEntry[self.partIter], 1 + self.entryCount, 1)
self.deleteButton.append(QtWidgets.QPushButton("X"))
self.deleteButton[self.partIter].clicked.connect(lambda extra, self=self, tmp=self.deleteButton[self.partIter]: self.deleteEntry(tmp))
self.grid.addWidget(self.deleteButton[self.partIter], 1 + self.entryCount, 2)
self.entryCount += 1
self.first = True
def deleteEntry(self, button=None, num=None):
if num is None:
num = self.deleteButton.index(button)
if num == self.partIter:
self.startVal[num] = 0
self.endVal[num] = self.father.current.len()
self.startEntry[num].setText("")
self.endEntry[num].setText("")
self.first = True
return
self.grid.removeWidget(self.endEntry[num])
self.grid.removeWidget(self.startEntry[num])
self.grid.removeWidget(self.deleteButton[num])
self.endEntry[num].deleteLater()
self.startEntry[num].deleteLater()
self.deleteButton[num].deleteLater()
self.endEntry.pop(num)
self.startEntry.pop(num)
self.deleteButton.pop(num)
self.startVal = np.delete(self.startVal, num)
self.endVal = np.delete(self.endVal, num)
self.partIter -= 1
def picked(self, pos):
self.addValue(pos[0])
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def setVal(self, entry, isMin=False):
inp = safeEval(entry.text(), length=self.father.current.len(), Type='FI')
error = False
if inp is not None:
inp = int(inp)
if inp < 0:
inp = 0
if inp > self.father.current.len():
inp = self.father.current.len()
if isMin:
num = self.startEntry.index(entry)
if inp is None:
self.startVal[num] = -1 # If the input is wrong, use -1 as a placeholder for it in the value list
self.father.father.dispMsg(self.NAME + ": wrong input")
error = True
elif self.endVal[num] == -1:
self.startVal[num] = inp
else:
self.startVal[num] = min(inp, self.endVal[num])
self.endVal[num] = max(inp, self.endVal[num])
else:
num = self.endEntry.index(entry)
if inp is None:
self.endVal[num] = -1
self.father.father.dispMsg(self.NAME + ": wrong input")
error = True
elif self.startVal[num] == -1:
self.endVal[num] = inp
else:
self.endVal[num] = max(inp, self.startVal[num])
self.startVal[num] = min(inp, self.startVal[num])
if num == self.partIter:
self.partIter += 1
self.startVal = np.append(self.startVal, 0)
self.endVal = np.append(self.endVal, self.father.current.len())
self.startEntry.append(wc.QLineEdit())
self.startEntry[self.partIter].editingFinished.connect(lambda self=self, tmp=self.startEntry[self.partIter]: self.setVal(tmp, True))
self.grid.addWidget(self.startEntry[self.partIter], 1 + self.entryCount, 0)
self.endEntry.append(wc.QLineEdit())
self.endEntry[self.partIter].editingFinished.connect(lambda self=self, tmp=self.endEntry[self.partIter]: self.setVal(tmp, False))
self.grid.addWidget(self.endEntry[self.partIter], 1 + self.entryCount, 1)
self.deleteButton.append(QtWidgets.QPushButton("X"))
self.deleteButton[self.partIter].clicked.connect(lambda extra, self=self, tmp=self.deleteButton[self.partIter]: self.deleteEntry(tmp))
self.grid.addWidget(self.deleteButton[self.partIter], 1 + self.entryCount, 2)
self.entryCount += 1
self.first = True
if error: # Return only after partIter check
return
if self.startVal[num] != -1: # Only if the input is OK, reprint it
self.startEntry[num].setText(str(self.startVal[num]))
if self.endVal[num] != -1:
self.endEntry[num].setText(str(self.endVal[num]))
def apply(self, maximum, minimum, newSpec):
pass
def applyFunc(self):
if self.partIter == 0:
self.apply(np.array([0]), np.array([self.father.current.len()]), self.newSpec.isChecked())
else:
self.apply(self.startVal[:self.partIter], self.endVal[:self.partIter], self.newSpec.isChecked())
############################################################
class integrateWindow(regionWindow):
def __init__(self, parent):
super(integrateWindow, self).__init__(parent, 'Integrate')
def apply(self, maximum, minimum, newSpec):
if np.any(maximum < 0) or np.any(minimum < 0): # Check for errors in the inputs
raise SsnakeException(self.NAME + ": wrong input")
if newSpec:
self.father.father.newWorkspace(self.father.current.integrate(minimum, maximum, newSpec))
else:
self.father.current.integrate(minimum, maximum, newSpec)
self.father.updAllFrames()
############################################################
class sumWindow(regionWindow):
def __init__(self, parent):
super(sumWindow, self).__init__(parent, 'Sum')
def apply(self, maximum, minimum, newSpec):
if np.any(maximum < 0) or np.any(minimum < 0):
raise SsnakeException(self.NAME + ": wrong input")
if newSpec:
self.father.father.newWorkspace(self.father.current.sum(minimum, maximum, newSpec))
else:
self.father.current.sum(minimum, maximum, newSpec)
self.father.updAllFrames()
############################################################
class maxWindow(regionWindow):
def __init__(self, parent):
super(maxWindow, self).__init__(parent, 'Max')
def apply(self, maximum, minimum, newSpec):
if np.any(maximum < 0) or np.any(minimum < 0):
raise SsnakeException(self.NAME + ": wrong input")
if newSpec:
self.father.father.newWorkspace(self.father.current.max(minimum, maximum, newSpec))
else:
self.father.current.max(minimum, maximum, newSpec)
self.father.updAllFrames()
############################################################
class minWindow(regionWindow):
def __init__(self, parent):
super(minWindow, self).__init__(parent, 'Min')
def apply(self, maximum, minimum, newSpec):
if np.any(maximum < 0) or np.any(minimum < 0):
raise SsnakeException(self.NAME + ": wrong input")
if newSpec:
self.father.father.newWorkspace(self.father.current.min(minimum, maximum, newSpec))
else:
self.father.current.min(minimum, maximum, newSpec)
self.father.updAllFrames()
############################################################
class argmaxWindow(regionWindow):
def __init__(self, parent):
super(argmaxWindow, self).__init__(parent, 'Max position')
def apply(self, maximum, minimum, newSpec):
if np.any(maximum < 0) or np.any(minimum < 0):
raise SsnakeException(self.NAME + ": wrong input")
if newSpec:
self.father.father.newWorkspace(self.father.current.argmax(minimum, maximum, newSpec))
else:
self.father.current.argmax(minimum, maximum, newSpec)
self.father.updAllFrames()
############################################################
class argminWindow(regionWindow):
def __init__(self, parent):
super(argminWindow, self).__init__(parent, 'Min position')
def apply(self, maximum, minimum, newSpec):
if np.any(maximum < 0) or np.any(minimum < 0):
raise SsnakeException(self.NAME + ": wrong input")
if newSpec:
self.father.father.newWorkspace(self.father.current.argmin(minimum, maximum, newSpec))
else:
self.father.current.argmin(minimum, maximum, newSpec)
self.father.updAllFrames()
############################################################
class avgWindow(regionWindow):
def __init__(self, parent):
super(avgWindow, self).__init__(parent, 'Average')
def apply(self, maximum, minimum, newSpec):
if np.any(maximum < 0) or np.any(minimum < 0):
raise SsnakeException(self.NAME + ": wrong input")
if newSpec:
self.father.father.newWorkspace(self.father.current.average(minimum, maximum, newSpec))
else:
self.father.current.average(minimum, maximum, newSpec)
self.father.updAllFrames()
#############################################################
class regionWindow2(wc.ToolWindow):
def __init__(self, parent, name, newSpecOption):
self.NAME = name
super(regionWindow2, self).__init__(parent)
self.startVal = 0
self.endVal = parent.current.len()
self.grid.addWidget(wc.QLabel("Start index:"), 0, 0)
self.startEntry = wc.QLineEdit(self.startVal, self.checkValues)
self.grid.addWidget(self.startEntry, 1, 0)
self.grid.addWidget(wc.QLabel("End index:"), 2, 0)
self.endEntry = wc.QLineEdit(self.endVal, self.checkValues)
self.grid.addWidget(self.endEntry, 3, 0)
self.newSpec = QtWidgets.QCheckBox("Result in new workspace")
if not newSpecOption:
self.newSpec.hide()
self.layout.addWidget(self.newSpec, 1, 0, 1, 2)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def preview(self, maximum, minimum):
pass
def picked(self, pos, second=False):
if second:
dataLength = self.father.current.len()
inp = safeEval(self.startEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.startVal = int(round(inp))
if self.startVal < 0:
self.startVal = 0
elif self.startVal > dataLength:
self.startVal = dataLength
self.startEntry.setText(str(self.startVal))
self.endVal = pos[0]
self.endEntry.setText(str(self.endVal))
self.preview(self.startVal, self.endVal)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
else:
self.startEntry.setText(str(pos[0]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, True)
self.father.current.peakPick = True
def checkValues(self, *args):
dataLength = self.father.current.len()
inp = safeEval(self.startEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.startVal = int(round(inp))
if self.startVal < 0:
self.startVal = 0
elif self.startVal > dataLength:
self.startVal = dataLength
self.startEntry.setText(str(self.startVal))
inp = safeEval(self.endEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.endVal = int(round(inp))
if self.endVal < 0:
self.endVal = 0
elif self.endVal > dataLength:
self.endVal = dataLength
self.endEntry.setText(str(self.endVal))
self.preview(self.startVal, self.endVal)
def applyFunc(self):
dataLength = self.father.current.len()
inp = safeEval(self.startEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException(self.NAME + ": value not valid")
inp = safeEval(self.endEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException(self.NAME + ": value not valid")
self.checkValues(self)
self.apply(self.startVal, self.endVal, self.newSpec.isChecked())
def apply(self, maximum, minimum, newSpec):
pass
#############################################################
class regionWindowStep(wc.ToolWindow):
def __init__(self, parent, name, newSpecOption):
self.NAME = name
super(regionWindowStep, self).__init__(parent)
self.startVal = 0
self.endVal = parent.current.len()
self.stepVal = 1;
self.grid.addWidget(wc.QLabel("Start index:"), 0, 0)
self.startEntry = wc.QLineEdit(self.startVal, self.checkValues)
self.grid.addWidget(self.startEntry, 1, 0)
self.grid.addWidget(wc.QLabel("End index:"), 2, 0)
self.endEntry = wc.QLineEdit(self.endVal, self.checkValues)
self.grid.addWidget(self.endEntry, 3, 0)
self.grid.addWidget(wc.QLabel("Step size:"), 4, 0)
self.stepEntry = wc.QLineEdit(self.stepVal, self.checkValues)
self.grid.addWidget(self.stepEntry, 5, 0)
self.newSpec = QtWidgets.QCheckBox("Result in new workspace")
if not newSpecOption:
self.newSpec.hide()
self.layout.addWidget(self.newSpec, 1, 0, 1, 2)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def preview(self, maximum, minimum, step):
pass
def picked(self, pos, second=False):
if second:
dataLength = self.father.current.len()
inp = safeEval(self.startEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.startVal = int(round(inp))
if self.startVal < 0:
self.startVal = 0
elif self.startVal > dataLength:
self.startVal = dataLength
self.startEntry.setText(str(self.startVal))
self.endVal = pos[0]
self.endEntry.setText(str(self.endVal))
self.preview(self.startVal, self.endVal, self.stepVal)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
else:
self.startEntry.setText(str(pos[0]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, True)
self.father.current.peakPick = True
def checkValues(self, *args):
dataLength = self.father.current.len()
inp = safeEval(self.startEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.startVal = int(round(inp))
if self.startVal < 0:
self.startVal = 0
elif self.startVal > dataLength:
self.startVal = dataLength
self.startEntry.setText(str(self.startVal))
inp = safeEval(self.endEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.endVal = int(round(inp))
if self.endVal < 0:
self.endVal = 0
elif self.endVal > dataLength:
self.endVal = dataLength
self.endEntry.setText(str(self.endVal))
inp = safeEval(self.stepEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.stepVal = int(round(inp))
if self.stepVal <= 0:
self.stepVal = 1
elif self.stepVal > dataLength:
self.stepVal = dataLength
self.stepEntry.setText(str(self.stepVal))
self.preview(self.startVal, self.endVal, self.stepVal)
def applyFunc(self):
dataLength = self.father.current.len()
inp = safeEval(self.startEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException(self.NAME + ": value not valid")
inp = safeEval(self.endEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException(self.NAME + ": value not valid")
inp = safeEval(self.stepEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException(self.NAME + ": value not valid")
self.checkValues(self)
self.apply(self.startVal, self.endVal, self.newSpec.isChecked(), self.stepVal)
def apply(self, maximum, minimum, newSpec, step):
pass
############################################################
class extractRegionWindow(regionWindowStep):
def __init__(self, parent):
super(extractRegionWindow, self).__init__(parent, 'Extract part', True)
def apply(self, maximum, minimum, newSpec, step):
if newSpec:
if self.father.father.newWorkspace(self.father.current.extract(minimum, maximum, newSpec, step)) is None:
return None
else:
self.father.current.extract(minimum, maximum, newSpec, step)
self.father.updAllFrames()
return 1
############################################################
class SubtractAvgWindow(regionWindow2):
def __init__(self, parent):
super(SubtractAvgWindow, self).__init__(parent, 'Subtract Avg', False)
def apply(self, maximum, minimum, newSpec):
self.father.current.subtractAvg(maximum, minimum)
self.father.updAllFrames()
return 1
def preview(self, maximum, minimum):
self.father.current.subtractAvgPreview(maximum, minimum)
############################################################
class AlignDataWindow(regionWindow2):
def __init__(self, parent):
super(AlignDataWindow, self).__init__(parent, 'Align Maxima', False)
def apply(self, maximum, minimum, newSpec):
self.father.current.align(maximum, minimum)
self.father.updAllFrames()
return 1
# def preview(self, maximum, minimum):
# self.father.current.subtractAvgPreview(maximum, minimum)
#############################################################
class FiddleWindow(wc.ToolWindow):
NAME = "Reference deconvolution"
def __init__(self, parent):
super(FiddleWindow, self).__init__(parent)
self.startVal = 0
self.endVal = parent.current.len()
self.grid.addWidget(wc.QLabel("Start index:"), 0, 0)
self.startEntry = wc.QLineEdit(self.startVal, self.checkValues)
self.grid.addWidget(self.startEntry, 1, 0)
self.grid.addWidget(wc.QLabel("End index:"), 2, 0)
self.endEntry = wc.QLineEdit(self.endVal, self.checkValues)
self.grid.addWidget(self.endEntry, 3, 0)
self.grid.addWidget(wc.QLabel("Linebroadening [Hz]:"), 4, 0)
self.lbEntry = wc.QLineEdit("1.0", self.checkValues)
self.grid.addWidget(self.lbEntry, 5, 0)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def picked(self, pos, second=False):
if second:
dataLength = self.father.current.len()
inp = safeEval(self.startEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.startVal = int(round(inp))
if self.startVal < 0:
self.startVal = 0
elif self.startVal > dataLength:
self.startVal = dataLength
self.startEntry.setText(str(self.startVal))
self.endVal = pos[0]
self.endEntry.setText(str(self.endVal))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
else:
self.startEntry.setText(str(pos[0]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, True)
self.father.current.peakPick = True
def checkValues(self, *args):
dataLength = self.father.current.len()
inp = safeEval(self.startEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.startVal = int(round(inp))
if self.startVal < 0:
self.startVal = 0
elif self.startVal > dataLength:
self.startVal = dataLength
self.startEntry.setText(str(self.startVal))
inp = safeEval(self.endEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.endVal = int(round(inp))
if self.endVal < 0:
self.endVal = 0
elif self.endVal > dataLength:
self.endVal = dataLength
self.endEntry.setText(str(self.endVal))
inp = safeEval(self.lbEntry.text(), length=self.father.current.len(), Type='FI')
if inp is not None:
self.lbEntry.setText(str(inp))
def applyFunc(self):
dataLength = self.father.current.len()
inp = safeEval(self.startEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Reference deconv: start entry not valid")
self.startVal = int(round(inp))
if self.startVal < 0:
self.startVal = 0
elif self.startVal > dataLength:
self.startVal = dataLength
inp = safeEval(self.endEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Reference deconv: end entry not valid")
self.endVal = int(round(inp))
if self.endVal < 0:
self.endVal = 0
elif self.endVal > dataLength:
self.endVal = dataLength
lb = safeEval(self.lbEntry.text(), length=self.father.current.len(), Type='FI')
if lb is None:
raise SsnakeException("Reference deconv: Linebroadening entry not valid")
self.father.current.fiddle(self.startVal, self.endVal, lb)
##############################################################
class DeleteWindow(wc.ToolWindow):
NAME = "Delete"
def __init__(self, parent):
super(DeleteWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Indexes to delete:"), 0, 0)
self.delEntry = wc.QLineEdit('0', self.preview)
self.grid.addWidget(self.delEntry, 1, 0)
def preview(self, *args):
length = int(self.father.current.len())
pos = safeEval(self.delEntry.text(), length=self.father.current.len())
if pos is None:
raise SsnakeException('Delete: not all values are valid indexes to delete')
pos = np.array(pos)
pos[pos < 0] = pos[pos < 0] + length
if (pos < 0).any() or (pos >= length).any():
raise SsnakeException('Delete: not all values are valid indexes to delete')
self.father.current.deletePreview(pos)
def applyFunc(self):
length = self.father.current.len()
pos = safeEval(self.delEntry.text(), length=self.father.current.len())
if pos is None:
raise SsnakeException('Delete: not all values are valid indexes to delete')
if isinstance(pos, (int, float)):
pos = np.array([pos])
else:
pos = np.array(pos)
pos[pos < 0] = pos[pos < 0] + length
if (pos < 0).any() or (pos >= length).any():
raise SsnakeException('Delete: not all values are valid indexes to delete')
self.father.current.delete(pos)
##############################################################
class SplitWindow(wc.ToolWindow):
NAME = "Split"
def __init__(self, parent):
super(SplitWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Sections:"), 0, 0)
self.splitEntry = wc.QLineEdit('1', self.preview)
self.grid.addWidget(self.splitEntry, 1, 0)
def preview(self, *args):
val = safeEval(self.splitEntry.text(), length=self.father.current.len(), Type='FI')
if val is None:
raise SsnakeException("Split: input not valid")
self.splitEntry.setText(str(int(round(val))))
def applyFunc(self):
val = safeEval(self.splitEntry.text(), length=self.father.current.len(), Type='FI')
if val is None:
raise SsnakeException("Split: input not valid")
val = int(val)
if val <= 0:
raise SsnakeException("Split: input not valid")
self.father.current.split(int(round(val)))
##############################################################
class ConcatenateWindow(wc.ToolWindow):
NAME = "Concatenate"
def __init__(self, parent):
super(ConcatenateWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Concatenation axes:"), 0, 0)
self.axesEntry = QtWidgets.QComboBox()
self.axesEntry.addItems(np.array(np.arange(self.father.current.data.ndim() - 1) + 1, dtype=str))
self.grid.addWidget(self.axesEntry, 1, 0)
def applyFunc(self):
self.father.current.concatenate(self.axesEntry.currentIndex())
##############################################################
class InsertWindow(wc.ToolWindow):
NAME = "Insert"
def __init__(self, parent):
super(InsertWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Start insert at index:"), 0, 0)
self.posEntry = wc.QLineEdit(self.father.current.len(), self.preview)
self.grid.addWidget(self.posEntry, 1, 0)
self.grid.addWidget(wc.QLabel("Workspace to insert:"), 2, 0)
self.wsEntry = QtWidgets.QComboBox()
self.wsEntry.addItems(self.father.father.workspaceNames)
self.grid.addWidget(self.wsEntry, 3, 0)
def preview(self, *args):
pos = safeEval(self.posEntry.text(), length=self.father.current.len(), Type='FI')
if pos is None:
return
pos = int(round(pos))
if pos > self.father.current.len():
pos = self.father.current.len()
elif pos < 0:
pos = 0
self.posEntry.setText(str(pos))
def applyFunc(self):
pos = safeEval(self.posEntry.text(), length=self.father.current.len(), Type='FI')
if pos is None:
raise SsnakeException("Not a valid value")
pos = int(round(pos))
if pos > self.father.current.len():
pos = self.father.current.len()
elif pos < 0:
pos = 0
ws = self.wsEntry.currentIndex()
self.father.current.insert(self.father.father.workspaces[ws].masterData.getData(), pos)
##############################################################
class CombineWindow(wc.ToolWindow):
SINGLESLICE = True
RESIZABLE = True
def __init__(self, parent, combType):
super(CombineWindow, self).__init__(parent)
self.combType = combType # 0 = add, 1 = subtract, 2 = multiply, 3 = divide
if self.combType == 0:
self.WindowTitle = "Add"
self.grid.addWidget(wc.QLabel("Workspace to add:"), 0, 0)
elif self.combType == 1:
self.WindowTitle = "Subtract"
self.grid.addWidget(wc.QLabel("Workspace to subtract:"), 0, 0)
elif self.combType == 2:
self.WindowTitle = "Multiply"
self.grid.addWidget(wc.QLabel("Workspace to multiply:"), 0, 0)
elif self.combType == 3:
self.WindowTitle = "Divide"
self.grid.addWidget(wc.QLabel("Workspace to divide:"), 0, 0)
self.setWindowTitle(self.WindowTitle)
self.wsEntry = QtWidgets.QComboBox()
self.wsEntry.addItems(self.father.father.workspaceNames)
self.grid.addWidget(self.wsEntry, 1, 0)
def applyFunc(self):
ws = self.wsEntry.currentIndex()
if self.combType == 0:
returnValue = self.father.current.add(self.father.father.workspaces[ws].masterData.getData(), self.singleSlice.isChecked())
elif self.combType == 1:
returnValue = self.father.current.subtract(self.father.father.workspaces[ws].masterData.getData(), self.singleSlice.isChecked())
elif self.combType == 2:
returnValue = self.father.current.multiply(self.father.father.workspaces[ws].masterData.getData(), self.singleSlice.isChecked())
elif self.combType == 3:
returnValue = self.father.current.divide(self.father.father.workspaces[ws].masterData.getData(), self.singleSlice.isChecked())
##############################################################
class SNWindow(wc.ToolWindow):
NAME = "Signal to noise"
CANCELNAME = "&Close"
OKNAME = "C&alc"
APPLYANDCLOSE = False
def __init__(self, parent):
super(SNWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Start index noise:"), 0, 0)
self.minNoiseEntry = wc.QLineEdit('0', self.checkValues)
self.grid.addWidget(self.minNoiseEntry, 1, 0)
self.grid.addWidget(wc.QLabel("End index noise:"), 2, 0)
self.maxNoiseEntry = wc.QLineEdit(parent.current.len(), self.checkValues)
self.grid.addWidget(self.maxNoiseEntry, 3, 0)
self.grid.addWidget(wc.QLabel("Start index signal:"), 4, 0)
self.minEntry = wc.QLineEdit('0', self.checkValues)
self.grid.addWidget(self.minEntry, 5, 0)
self.grid.addWidget(wc.QLabel("End index signal:"), 6, 0)
self.maxEntry = wc.QLineEdit(parent.current.len(), self.checkValues)
self.grid.addWidget(self.maxEntry, 7, 0)
self.grid.addWidget(wc.QLabel("S/N:"), 8, 0)
self.snEntry = wc.QLineEdit("0.0")
self.grid.addWidget(self.snEntry, 9, 0)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def picked(self, pos, num=0):
if num == 0:
self.minNoiseEntry.setText(str(pos[0]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, 1)
self.father.current.peakPick = True
elif num == 1:
self.maxNoiseEntry.setText(str(pos[0]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, 2)
self.father.current.peakPick = True
elif num == 2:
self.minEntry.setText(str(pos[0]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, 3)
self.father.current.peakPick = True
elif num == 3:
self.maxEntry.setText(str(pos[0]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, 0)
self.father.current.peakPick = True
self.applyFunc()
def checkValues(self, *args):
dataLength = self.father.current.len()
inp = safeEval(self.minNoiseEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
return
minimum = int(round(inp))
if minimum < 0:
minimum = 0
elif minimum > dataLength:
minimum = dataLength
self.minNoiseEntry.setText(str(minimum))
inp = safeEval(self.maxNoiseEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
return
maximum = int(round(inp))
if maximum < 0:
maximum = 0
elif maximum > dataLength:
maximum = dataLength
self.maxNoiseEntry.setText(str(maximum))
inp = safeEval(self.minEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
return
minimum = int(round(inp))
if minimum < 0:
minimum = 0
elif minimum > dataLength:
minimum = dataLength
self.minEntry.setText(str(minimum))
inp = safeEval(self.maxEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
return
maximum = int(round(inp))
if maximum < 0:
maximum = 0
elif maximum > dataLength:
maximum = dataLength
self.maxEntry.setText(str(maximum))
self.applyFunc()
def applyFunc(self):
dataLength = self.father.current.len()
inp = safeEval(self.minNoiseEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("S/N: invalid range")
minimumNoise = int(round(inp))
if minimumNoise < 0:
minimumNoise = 0
elif minimumNoise > dataLength:
minimumNoise = dataLength
self.minNoiseEntry.setText(str(minimumNoise))
inp = safeEval(self.maxNoiseEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("S/N: invalid range")
maximumNoise = int(round(inp))
if maximumNoise < 0:
maximumNoise = 0
elif maximumNoise > dataLength:
maximumNoise = dataLength
self.maxNoiseEntry.setText(str(maximumNoise))
inp = safeEval(self.minEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("S/N: invalid range")
minimum = int(round(inp))
if minimum < 0:
minimum = 0
elif minimum > dataLength:
minimum = dataLength
self.minEntry.setText(str(minimum))
inp = safeEval(self.maxEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("S/N: invalid range")
maximum = int(round(inp))
if maximum < 0:
maximum = 0
elif maximum > dataLength:
maximum = dataLength
self.maxEntry.setText(str(maximum))
self.snEntry.setText(str(self.father.current.SN(minimumNoise, maximumNoise, minimum, maximum)))
##############################################################
class FWHMWindow(wc.ToolWindow):
NAME = "FWHM"
CANCELNAME = "&Close"
OKNAME = "C&alc"
APPLYANDCLOSE = False
def __init__(self, parent):
super(FWHMWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Start index:"), 0, 0)
self.minEntry = wc.QLineEdit('0', self.checkValues)
self.grid.addWidget(self.minEntry, 1, 0)
self.grid.addWidget(wc.QLabel("End index:"), 2, 0)
self.maxEntry = wc.QLineEdit(parent.current.len(), self.checkValues)
self.grid.addWidget(self.maxEntry, 3, 0)
self.grid.addWidget(wc.QLabel("Units:"), 4, 0)
unitSelect = self.father.current.getAxType()
if self.father.current.spec() == 1:
unitList = ['Hz', 'kHz', 'MHz', 'ppm']
if self.father.current.getppm():
unitSelect = 3
else:
unitList = ['s', 'ms', u'μs']
self.unitDrop = QtWidgets.QComboBox()
self.unitDrop.addItems(unitList)
self.unitDrop.setCurrentIndex(unitSelect)
self.unitDrop.currentIndexChanged.connect(self.checkValues)
self.grid.addWidget(self.unitDrop, 5, 0)
self.grid.addWidget(wc.QLabel(u"FWHM:"), 6, 0)
self.fwhmEntry = wc.QLineEdit("0.0")
self.grid.addWidget(self.fwhmEntry, 7, 0)
self.grid.addWidget(wc.QLabel(u"0.55%:"), 8, 0)
self.zffEntry = wc.QLineEdit("0.0")
self.grid.addWidget(self.zffEntry, 9, 0)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def picked(self, pos, num=0):
if num == 0:
self.minEntry.setText(str(pos[0]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, 1)
self.father.current.peakPick = True
elif num == 1:
self.maxEntry.setText(str(pos[0]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, 0)
self.father.current.peakPick = True
self.applyFunc()
def checkValues(self, *args):
dataLength = self.father.current.len()
inp = safeEval(self.minEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
return
minimum = int(round(inp))
if minimum < 0:
minimum = 0
elif minimum > dataLength:
minimum = dataLength
self.minEntry.setText(str(minimum))
inp = safeEval(self.maxEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
return
maximum = int(round(inp))
if maximum < 0:
maximum = 0
elif maximum > dataLength:
maximum = dataLength
self.maxEntry.setText(str(maximum))
self.applyFunc()
def applyFunc(self):
dataLength = self.father.current.len()
inp = safeEval(self.minEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("FWHM: invalid range")
minimum = int(round(inp))
if minimum < 0:
minimum = 0
elif minimum > dataLength:
minimum = dataLength
self.minEntry.setText(str(minimum))
inp = safeEval(self.maxEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("FWHM: invalid range")
maximum = int(round(inp))
if maximum < 0:
maximum = 0
elif maximum > dataLength:
maximum = dataLength
self.maxEntry.setText(str(maximum))
self.fwhmEntry.setText(str(self.father.current.fwhm(minimum, maximum, 0.5, self.unitDrop.currentIndex())))
self.zffEntry.setText(str(self.father.current.fwhm(minimum, maximum, 0.0055, self.unitDrop.currentIndex())))
##############################################################
class COMWindow(wc.ToolWindow): # Centre of Mass Window
NAME = "Centre of Mass"
CANCELNAME = "&Close"
OKNAME = "C&alc"
APPLYANDCLOSE = False
def __init__(self, parent):
super(COMWindow, self).__init__(parent)
self.pickDim = 1
if isinstance(self.father.current, views.CurrentContour):
self.pickDim = 2
self.grid.addWidget(wc.QLabel("X axis:"), 0, 0, 1, 2)
self.grid.addWidget(wc.QLabel("Start:"), 1, 0)
self.grid.addWidget(wc.QLabel("End:"), 2, 0)
if self.pickDim == 2:
unitSelectY = self.father.current.getAxType(-2)
if self.father.current.spec(-2) == 1:
unitListY = ['Hz', 'kHz', 'MHz', 'ppm']
if self.father.current.getppm(-2):
unitSelectY = 3
else:
unitListY = ['s', 'ms', u'μs']
self.grid.addWidget(wc.QLabel("Y axis:"), 3, 0, 1, 2)
self.grid.addWidget(wc.QLabel("Start:"), 4, 0)
self.grid.addWidget(wc.QLabel("End:"), 5, 0)
self.minEntryY = wc.QLineEdit("0", lambda: self.applyFunc(False))
self.grid.addWidget(self.minEntryY, 4, 1)
self.maxEntryY = wc.QLineEdit(parent.current.len(-2), lambda: self.applyFunc(False))
self.grid.addWidget(self.maxEntryY, 5, 1)
self.grid.addWidget(wc.QLabel("Y Unit:"), 11, 0)
self.unitDropY = QtWidgets.QComboBox()
self.unitDropY.addItems(unitListY)
self.unitDropY.setCurrentIndex(unitSelectY)
self.unitDropY.currentIndexChanged.connect(lambda: self.applyFunc(True))
self.grid.addWidget(self.unitDropY, 11, 1)
self.comEntryY = wc.QLineEdit("0.0")
self.grid.addWidget(wc.QLabel(u"Y COM:"), 12, 0)
self.grid.addWidget(self.comEntryY, 12, 1)
if self.father.current.spec() == 1:
unitList = ['Hz', 'kHz', 'MHz', 'ppm']
if self.father.current.getppm():
unitSelect = 3
else:
unitList = ['s', 'ms', u'μs']
self.minEntry = wc.QLineEdit("0", lambda: self.applyFunc(False))
self.grid.addWidget(self.minEntry, 1, 1)
self.maxEntry = wc.QLineEdit(parent.current.len(), lambda: self.applyFunc(False))
self.grid.addWidget(self.maxEntry, 2, 1)
unitSelect = self.father.current.getAxType()
self.grid.addWidget(wc.QLabel(u"Centre of Mass:"), 8, 0, 1, 2)
self.grid.addWidget(wc.QLabel("X Unit:"), 9, 0)
self.unitDrop = QtWidgets.QComboBox()
self.unitDrop.addItems(unitList)
self.unitDrop.setCurrentIndex(unitSelect)
self.unitDrop.currentIndexChanged.connect(lambda: self.applyFunc(True))
self.grid.addWidget(self.unitDrop, 9, 1)
self.comEntry = wc.QLineEdit("0.0")
self.grid.addWidget(wc.QLabel(u"X COM:"), 10, 0)
self.grid.addWidget(self.comEntry, 10, 1)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = self.pickDim
def picked(self, pos, num=0):
if num == 0:
self.minEntry.setText(str(pos[0]))
if self.pickDim == 2:
self.minEntryY.setText(str(pos[3]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, 1)
self.father.current.peakPick = self.pickDim
elif num == 1:
self.maxEntry.setText(str(pos[0]))
if self.pickDim == 2:
self.maxEntryY.setText(str(pos[3]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, 0)
self.father.current.peakPick = self.pickDim
self.applyFunc()
def applyFunc(self, calc=True):
dataLength = self.father.current.len()
inp = safeEval(self.minEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Centre of Mass: invalid range")
minimum = int(round(inp))
if minimum < 0:
minimum = 0
elif minimum > dataLength:
minimum = dataLength
self.minEntry.setText(str(minimum))
inp = safeEval(self.maxEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Centre of Mass: invalid range")
maximum = int(round(inp))
if maximum < 0:
maximum = 0
elif maximum > dataLength:
maximum = dataLength
self.maxEntry.setText(str(maximum))
#For contour
if self.pickDim == 2:
dataLengthY = self.father.current.len(-2)
inp = safeEval(self.minEntryY.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Centre of Mass: invalid range")
minimumY = int(round(inp))
if minimumY < 0:
minimumY = 0
elif minimumY > dataLengthY:
minimumY = dataLengthY
self.minEntryY.setText(str(minimumY))
inp = safeEval(self.maxEntryY.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Centre of Mass: invalid range")
maximumY = int(round(inp))
if maximumY < 0:
maximumY = 0
elif maximumY > dataLengthY:
maximumY = dataLengthY
self.maxEntryY.setText(str(maximumY))
if calc:
if self.pickDim == 1:
self.comEntry.setText(str(self.father.current.COM([minimum], [maximum], [self.unitDrop.currentIndex()])[0]))
elif self.pickDim == 2:
com = self.father.current.COM([minimum, minimumY], [maximum, maximumY], [self.unitDrop.currentIndex(), self.unitDropY.currentIndex()])
self.comEntry.setText(str(com[0]))
self.comEntryY.setText(str(com[1]))
##########################################################################################
class IntegralsWindow(wc.ToolWindow):
NAME = "Integrals"
CANCELNAME = "&Close"
OKNAME = "C&alc"
APPLYANDCLOSE = False
def __init__(self, parent):
super(IntegralsWindow, self).__init__(parent)
self.pickDim = 1
if isinstance(self.father.current, views.CurrentContour):
self.pickDim = 2
self.grid.addWidget(wc.QLabel("Start index X:"), 0, 0)
self.grid.addWidget(wc.QLabel("End index X:"), 0, 1)
if self.pickDim == 2:
self.grid.addWidget(wc.QLabel("Start index Y:"), 0, 2)
self.grid.addWidget(wc.QLabel("End index Y:"), 0, 3)
self.grid.addWidget(wc.QLabel("Integral:"), 0, 4)
self.scaling = 1
self.num = 0
self.pickType = 0
self.minEntries = []
self.maxEntries = []
self.minEntriesY = []
self.maxEntriesY = []
self.intEntries = []
self.intValues = []
self.xValues = []
self.yValues = []
self.datMax = 0
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = self.pickDim
def picked(self, pos):
if self.pickDim == 2:
posY = str(pos[3])
pos = str(pos[0])
if self.pickType == 0:
self.minEntries.append(wc.QLineEdit(pos, self.applyFunc))
self.maxEntries.append(wc.QLineEdit('', self.applyFunc))
self.intEntries.append(wc.QLineEdit('', (lambda n: lambda: self.setScaling(n))(self.num)))
self.intValues.append(None)
self.xValues.append(None)
self.yValues.append(None)
self.intEntries[-1].setMinimumWidth(120)
self.grid.addWidget(self.minEntries[-1], self.num+1, 0)
self.grid.addWidget(self.maxEntries[-1], self.num+1, 1)
self.grid.addWidget(self.intEntries[-1], self.num+1, 4)
if self.pickDim == 2:
self.minEntriesY.append(wc.QLineEdit(posY, self.applyFunc))
self.maxEntriesY.append(wc.QLineEdit('', self.applyFunc))
self.grid.addWidget(self.minEntriesY[-1], self.num+1, 2)
self.grid.addWidget(self.maxEntriesY[-1], self.num+1, 3)
self.pickType = 1
elif self.pickType == 1:
self.maxEntries[-1].setText(pos)
if self.pickDim == 2:
self.maxEntriesY[-1].setText(posY)
self.num += 1
self.applyFunc()
self.pickType = 0
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = self.pickDim
def preview(self):
if self.pickDim == 1:
self.father.current.integralsPreview(self.xValues, self.yValues, self.datMax)
self.father.current.peakPick = True
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
if self.pickDim == 2:
xMin = [int(x.text()) for x in self.minEntries]
xMax = [int(x.text()) for x in self.maxEntries]
yMin = [int(x.text()) for x in self.minEntriesY]
yMax = [int(x.text()) for x in self.maxEntriesY]
self.father.current.integralsPreview(xMin, xMax, yMin, yMax)
def setScaling(self, num):
inp = safeEval(self.intEntries[num].text(), length=self.father.current.len(), Type='FI')
int = self.intValues[num]
if inp is None:
return
else:
self.scaling = int / inp
self.applyFunc()
def applyFunc(self):
dataLength = [self.father.current.shape()[-1] - 1]
Parts = [[self.minEntries], [self.maxEntries]]
if self.pickDim == 2:
dataLength.append(self.father.current.shape()[-2] - 1)
Parts[0].append(self.minEntriesY)
Parts[1].append(self.maxEntriesY)
for num in range(len(self.minEntries)):
results = [[], []] #The min/max results
ok = []
for place, _ in enumerate(Parts):
for i, part in enumerate(Parts[place]):
inp = safeEval(part[num].text(), length=dataLength, Type='FI')
if inp is None:
part[num].setText('')
ok.append(False)
else:
ok.append(True)
tmp = int(round(inp))
tmp = min(max(tmp, 0), dataLength[i]) #makes sure that 0 < value < Length
results[place].append(tmp)
part[num].setText(str(tmp))
if all(ok):
self.intValues[num], self.xValues[num], self.yValues[num], self.datMax = self.father.current.Integrals(*results)
self.intEntries[num].setText('%#.7g' % (self.intValues[num] / self.scaling))
else:
self.intEntries[num].setText('')
self.intValues[num] = None
self.preview()
##########################################################################################
class ReorderWindow(wc.ToolWindow):
NAME = "Reorder"
def __init__(self, parent):
super(ReorderWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Positions of the spectra:"), 0, 0)
self.valEntry = wc.QLineEdit('', self.preview)
self.grid.addWidget(self.valEntry, 1, 0)
fileButton = QtWidgets.QPushButton("&Browse")
fileButton.clicked.connect(self.getPosFromFile)
self.grid.addWidget(fileButton, 2, 0)
self.grid.addWidget(wc.QLabel("Length of dimension:"), 3, 0)
self.lengthEntry = wc.QLineEdit('', self.preview)
self.grid.addWidget(self.lengthEntry, 4, 0)
def preview(self, *args):
pass
def getPosFromFile(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', self.father.father.lastLocation)
if isinstance(filename, tuple):
filename = filename[0]
if filename: # if not cancelled
self.father.father.lastLocation = os.path.dirname(filename) # Save used path
if not filename:
return
self.valEntry.setText(repr(np.loadtxt(filename, dtype=int)))
def applyFunc(self):
newLength = self.lengthEntry.text()
if newLength == '':
newLength = None
else:
newLength = safeEval(self.lengthEntry.text(), length=self.father.current.len(), Type='FI')
if newLength is None:
raise SsnakeException("Reorder: `Length' input is not valid")
val = safeEval(self.valEntry.text(), length=int(self.father.current.len()))
if not isinstance(val, (list, np.ndarray)):
raise SsnakeException("Reorder: `Positions' input is not a list or array")
if len(val) != self.father.current.len():
raise SsnakeException("Reorder: length of input does not match length of data")
val = np.array(val, dtype=int)
check = self.father.current.reorder(val, newLength)
if check is False:
raise SsnakeException("Reorder: error during applying")
##########################################################################################
class RegridWindow(wc.ToolWindow):
NAME = "Regrid"
def __init__(self, parent):
super(RegridWindow, self).__init__(parent)
self.typeDrop = QtWidgets.QComboBox(parent=self)
self.typeDrop.addItems(["Min/max input"])
self.grid.addWidget(self.typeDrop, 0, 0, 1, 2)
# Get unit
if self.father.current.spec() == 1:
if self.father.masterData.shape()[self.father.current.axes[-1]] == 1:
self.closeEvent()
raise SsnakeException("Regrid: Regrid not possible with size 1")
if self.father.current.getppm():
self.unit = 'ppm'
else:
axType = self.father.current.getAxType()
if axType == 0:
self.unit = 'Hz'
elif axType == 1:
self.unit = 'kHz'
elif axType == 2:
self.unit = 'MHz'
elif axType == 3:
self.unit = 'ppm'
maxVal = self.father.current.xax()[-1]
minVal = self.father.current.xax()[0]
if self.unit == 'kHz':
maxVal /= 1e3
minVal /= 1e3
elif self.unit == 'MHz':
maxVal /= 1e6
minVal /= 1e6
elif self.unit == 'ppm':
maxVal /= self.father.masterData.ref[self.father.current.axes[-1]] / 1e6
minVal /= self.father.masterData.ref[self.father.current.axes[-1]] / 1e6
self.maxValue = wc.QLineEdit(maxVal)
self.maxValue.setMinimumWidth(150)
self.maxLabel = wc.QLeftLabel('Max [' + self.unit + ']:')
self.minValue = wc.QLineEdit(minVal)
self.minLabel = wc.QLeftLabel('Min [' + self.unit + ']:')
self.points = wc.QLineEdit(self.father.masterData.shape()[self.father.current.axes[-1]])
self.pointsLabel = wc.QLeftLabel('# of points:')
self.grid.addWidget(self.minValue, 1, 1)
self.grid.addWidget(self.minLabel, 1, 0)
self.grid.addWidget(self.maxValue, 2, 1)
self.grid.addWidget(self.maxLabel, 2, 0)
self.grid.addWidget(self.pointsLabel, 3, 0)
self.grid.addWidget(self.points, 3, 1)
else:
self.closeEvent()
def applyFunc(self):
maxVal = safeEval(self.maxValue.text(), length=self.father.current.len(), Type='FI')
if maxVal is None:
raise SsnakeException("Regrid: 'Max' input not valid")
minVal = safeEval(self.minValue.text(), length=self.father.current.len(), Type='FI')
if minVal is None:
raise SsnakeException("Regrid: 'Min' input not valid")
numPoints = safeEval(self.points.text(), length=self.father.current.len(), Type='FI')
if numPoints is None or numPoints == 1:
raise SsnakeException("Regrid: '# of points' input not valid")
numPoints = int(numPoints)
# Convert to Hz/s
if self.unit == 'kHz':
maxVal *= 1e3
minVal *= 1e3
elif self.unit == 'MHz':
maxVal *= 1e6
minVal *= 1e6
elif self.unit == 'ppm':
maxVal *= self.father.masterData.ref[self.father.current.axes[-1]] / 1e6
minVal *= self.father.masterData.ref[self.father.current.axes[-1]] / 1e6
self.father.current.regrid([minVal, maxVal], numPoints)
##########################################################################################
class FFMWindow(wc.ToolWindow):
NAME = "FFM"
def __init__(self, parent):
super(FFMWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Positions of the spectra:"), 0, 0)
self.valEntry = wc.QLineEdit('', self.preview)
self.grid.addWidget(self.valEntry, 1, 0)
fileButton = QtWidgets.QPushButton("&Browse")
fileButton.clicked.connect(self.getPosFromFile)
self.grid.addWidget(fileButton, 2, 0)
self.grid.addWidget(wc.QLabel("Type of the position list:"), 3, 0)
self.typeDrop = QtWidgets.QComboBox(parent=self)
self.typeDrop.addItems(["Complex", "States/States-TPPI", "TPPI"])
self.grid.addWidget(self.typeDrop, 4, 0)
self.grid.addWidget(wc.QLabel("Reconstruction may take a while"), 5, 0)
def preview(self, *args):
pass
def getPosFromFile(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', self.father.father.lastLocation)
if isinstance(filename, tuple):
filename = filename[0]
if filename: # if not cancelled
self.father.father.lastLocation = os.path.dirname(filename) # Save used path
if not filename:
return
self.valEntry.setText(repr(np.loadtxt(filename, dtype=int)))
def applyFunc(self):
val = safeEval(self.valEntry.text(), length=self.father.current.len())
if not isinstance(val, (list, np.ndarray)):
raise SsnakeException("FFM: 'Positions' is not a list or array")
val = np.array(val, dtype=int)
check = self.father.current.ffm(val, self.typeDrop.currentIndex())
if check is False:
raise SsnakeException("FFM: error")
##########################################################################################
class CLEANWindow(wc.ToolWindow):
NAME = "CLEAN"
def __init__(self, parent):
super(CLEANWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Positions of the spectra:"), 0, 0)
self.valEntry = wc.QLineEdit('', self.preview)
self.grid.addWidget(self.valEntry, 1, 0)
fileButton = QtWidgets.QPushButton("&Browse")
fileButton.clicked.connect(self.getPosFromFile)
self.grid.addWidget(fileButton, 2, 0)
self.grid.addWidget(wc.QLabel("Type of the position list:"), 3, 0)
self.typeDrop = QtWidgets.QComboBox(parent=self)
self.typeDrop.addItems(["Complex", "States/States-TPPI", "TPPI"])
self.grid.addWidget(self.typeDrop, 4, 0)
self.grid.addWidget(wc.QLabel("Gamma:"), 5, 0)
self.gammaEntry = wc.QLineEdit("0.2")
self.grid.addWidget(self.gammaEntry, 6, 0)
self.grid.addWidget(wc.QLabel("Threshold:"), 7, 0)
self.thresholdEntry = wc.QLineEdit("2.0")
self.grid.addWidget(self.thresholdEntry, 8, 0)
self.grid.addWidget(wc.QLabel("Max. iterations:"), 11, 0)
self.maxIterEntry = wc.QLineEdit("2000")
self.grid.addWidget(self.maxIterEntry, 12, 0)
def preview(self, *args):
pass
def getPosFromFile(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', self.father.father.lastLocation)
if isinstance(filename, tuple):
filename = filename[0]
if filename: # if not cancelled
self.father.father.lastLocation = os.path.dirname(filename) # Save used path
if not filename:
return
self.valEntry.setText(repr(np.loadtxt(filename, dtype=int)))
def applyFunc(self):
val = safeEval(self.valEntry.text(), length=self.father.current.len())
if not isinstance(val, (list, np.ndarray)):
raise SsnakeException("CLEAN: 'Positions' is not a list or array")
val = np.array(val, dtype=int)
gamma = safeEval(self.gammaEntry.text(), length=self.father.current.len(), Type='FI')
if gamma is None:
raise SsnakeException("CLEAN: 'Gamma' input is not valid")
threshold = safeEval(self.thresholdEntry.text(), length=self.father.current.len(), Type='FI')
if threshold is None:
raise SsnakeException("CLEAN: 'Threshold' input is not valid")
threshold = threshold
maxIter = safeEval(self.maxIterEntry.text(), length=self.father.current.len(), Type='FI')
if maxIter is None:
raise SsnakeException("CLEAN: 'Max. iter.' is not valid")
maxIter = int(maxIter)
check = self.father.current.clean(val, self.typeDrop.currentIndex(), gamma, threshold, maxIter)
if check is False:
raise SsnakeException("CLEAN: error")
################################################################
class ISTWindow(wc.ToolWindow):
NAME = "IST"
def __init__(self, parent):
super(ISTWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Positions of the spectra:"), 0, 0)
self.valEntry = wc.QLineEdit('', self.preview)
self.grid.addWidget(self.valEntry, 1, 0)
fileButton = QtWidgets.QPushButton("&Browse")
fileButton.clicked.connect(self.getPosFromFile)
self.grid.addWidget(fileButton, 2, 0)
self.grid.addWidget(wc.QLabel("Type of the position list:"), 3, 0)
self.typeDrop = QtWidgets.QComboBox(parent=self)
self.typeDrop.addItems(["Complex", "States/States-TPPI", "TPPI"])
self.grid.addWidget(self.typeDrop, 4, 0)
self.grid.addWidget(wc.QLabel("Threshold:"), 5, 0)
self.thresholdEntry = wc.QLineEdit("0.9")
self.grid.addWidget(self.thresholdEntry, 6, 0)
self.grid.addWidget(wc.QLabel("Max. iterations:"), 7, 0)
self.maxIterEntry = wc.QLineEdit("100")
self.grid.addWidget(self.maxIterEntry, 8, 0)
self.grid.addWidget(wc.QLabel("Stop when residual below (% of ND max):"), 9, 0)
self.tracelimitEntry = wc.QLineEdit("2.0")
self.grid.addWidget(self.tracelimitEntry, 10, 0)
def preview(self, *args):
pass
def getPosFromFile(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', self.father.father.lastLocation)
if isinstance(filename, tuple):
filename = filename[0]
if filename: # if not cancelled
self.father.father.lastLocation = os.path.dirname(filename) # Save used path
if not filename:
return
self.valEntry.setText(repr(np.loadtxt(filename, dtype=int)))
def applyFunc(self):
val = safeEval(self.valEntry.text(), length=self.father.current.len())
if not isinstance(val, (list, np.ndarray)):
raise SsnakeException("IST: 'Positions' input is not a list or array")
val = np.array(val, dtype=int)
tracelimit = safeEval(self.tracelimitEntry.text(), length=self.father.current.len(), Type='FI')
if tracelimit is None:
raise SsnakeException("IST: 'Residual' input is not valid")
tracelimit /= 100
threshold = safeEval(self.thresholdEntry.text(), length=self.father.current.len(), Type='FI')
if threshold is None:
raise SsnakeException("IST: 'Threshold' input is not valid")
maxIter = safeEval(self.maxIterEntry.text(), length=self.father.current.len(), Type='FI')
if maxIter is None:
raise SsnakeException("IST: 'Max. iter.' input is not valid")
maxIter = int(maxIter)
check = self.father.current.ist(val, self.typeDrop.currentIndex(), threshold, maxIter, tracelimit)
if check is False:
raise SsnakeException("IST: error")
################################################################
class ShearingWindow(wc.ToolWindow):
NAME = "Shearing"
def __init__(self, parent):
super(ShearingWindow, self).__init__(parent)
options = list(map(str, range(1, self.father.masterData.ndim() + 1)))
self.grid.addWidget(wc.QLabel("Shearing constant:"), 0, 0)
self.shearDropdown = QtWidgets.QComboBox()
self.shearDropdown.addItems(['User Defined', 'Spin 3/2, -3Q (7/9)', 'Spin 5/2, 3Q (19/12)', 'Spin 5/2, -5Q (25/12)', 'Spin 7/2, 3Q (101/45)',
'Spin 7/2, 5Q (11/9)', 'Spin 7/2, -7Q (161/45)', 'Spin 9/2, 3Q (91/36)', 'Spin 9/2, 5Q (95/36)', 'Spin 9/2, 7Q (7/18)', 'Spin 9/2, -9Q (31/6)'])
self.shearDropdown.activated.connect(self.dropdownChanged)
self.shearList = [0, 7.0 / 9.0, 19.0 / 12.0, 25.0 / 12.0, 101.0 / 45.0, 11.0 / 9.0, 161.0 / 45.0, 91.0 / 36.0, 95.0 / 36.0, 7.0 / 18.0, 31.0 / 6.0]
self.grid.addWidget(self.shearDropdown, 1, 0)
self.shearEntry = wc.QLineEdit("0.0", self.shearPreview)
self.grid.addWidget(self.shearEntry, 3, 0)
self.grid.addWidget(wc.QLabel("Shearing direction:"), 4, 0)
self.dirEntry = QtWidgets.QComboBox()
self.dirEntry.addItems(options)
self.dirEntry.setCurrentIndex(self.father.masterData.ndim() - 2)
self.grid.addWidget(self.dirEntry, 5, 0)
self.grid.addWidget(wc.QLabel("Shearing axis:"), 6, 0)
self.axEntry = QtWidgets.QComboBox()
self.axEntry.addItems(options)
self.axEntry.setCurrentIndex(self.father.masterData.ndim() - 1)
self.grid.addWidget(self.axEntry, 7, 0)
self.toRefCheck = QtWidgets.QCheckBox("Relative to Reference")
self.grid.addWidget(self.toRefCheck, 8, 0)
def dropdownChanged(self):
index = self.shearDropdown.currentIndex()
self.shearEntry.setText("%.9f" % self.shearList[index])
def shearPreview(self, *args):
shear = safeEval(self.shearEntry.text(), length=self.father.current.len(), Type='FI')
if shear is not None:
self.shearEntry.setText(str(float(shear)))
def applyFunc(self):
shear = safeEval(self.shearEntry.text(), length=self.father.current.len(), Type='FI')
if shear is None:
raise SsnakeException("Shearing: 'constant' not a valid value")
axis = self.dirEntry.currentIndex()
axis2 = self.axEntry.currentIndex()
if axis == axis2:
raise SsnakeException("Shearing: axes cannot be the same for shearing")
self.father.current.shearing(float(shear), axis, axis2, self.toRefCheck.isChecked())
##########################################################################################
class MultiplyWindow(wc.ToolWindow):
NAME = "Multiply"
SINGLESLICE = True
def __init__(self, parent):
super(MultiplyWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Values:"), 0, 0)
self.valEntry = wc.QLineEdit('', self.preview)
self.grid.addWidget(self.valEntry, 1, 0)
def preview(self, *args):
val = safeEval(self.valEntry.text(), length=self.father.current.len())
if val is None:
raise SsnakeException("Multiply: input not valid")
self.father.current.multiplyPreview(np.array(val))
def applyFunc(self):
val = safeEval(self.valEntry.text(), length=self.father.current.len())
if val is None:
raise SsnakeException("Multiply: input not valid")
self.father.current.multiply(np.array(val), self.singleSlice.isChecked())
##########################################################################################
class NormalizeWindow(wc.ToolWindow):
NAME = "Normalize"
SINGLESLICE = True
def __init__(self, parent):
super(NormalizeWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Start index:"), 0, 0)
self.minEntry = wc.QLineEdit("0", self.checkValues)
self.grid.addWidget(self.minEntry, 1, 0)
self.grid.addWidget(wc.QLabel("End index:"), 2, 0)
self.maxEntry = wc.QLineEdit(parent.current.len(), self.checkValues)
self.grid.addWidget(self.maxEntry, 3, 0)
self.grid.addWidget(wc.QLabel("Type:"), 4, 0)
self.typeDrop = QtWidgets.QComboBox()
self.typeDrop.addItems(['Integral', 'Maximum', 'Minimum'])
self.typeDrop.setCurrentIndex(0)
self.typeDrop.currentIndexChanged.connect(self.checkValues)
self.grid.addWidget(self.typeDrop, 5, 0)
self.grid.addWidget(wc.QLabel("Multiplier:"), 6, 0)
self.valEntry = wc.QLineEdit("1.0")
self.grid.addWidget(self.valEntry, 7, 0)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def picked(self, pos, num=0):
if num == 0:
self.minEntry.setText(str(pos[0]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, 1)
self.father.current.peakPick = True
elif num == 1:
self.maxEntry.setText(str(pos[0]))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos, 0)
self.father.current.peakPick = True
#self.applyFunc()
def checkValues(self, *args):
dataLength = self.father.current.len()
inp = safeEval(self.minEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
return
minimum = int(round(inp))
if minimum < 0:
minimum = 0
elif minimum > dataLength:
minimum = dataLength
self.minEntry.setText(str(minimum))
inp = safeEval(self.maxEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
return
maximum = int(round(inp))
if maximum < 0:
maximum = 0
elif maximum > dataLength:
maximum = dataLength
self.maxEntry.setText(str(maximum))
#self.applyFunc()
def applyFunc(self):
dataLength = self.father.current.len()
inp = safeEval(self.minEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Normalize: invalid range")
minimum = int(round(inp))
if minimum < 0:
minimum = 0
elif minimum > dataLength:
minimum = dataLength
self.minEntry.setText(str(minimum))
inp = safeEval(self.maxEntry.text(), length=self.father.current.len(), Type='FI')
if inp is None:
raise SsnakeException("Normalize: invalid range")
maximum = int(round(inp))
if maximum < 0:
maximum = 0
elif maximum > dataLength:
maximum = dataLength
self.maxEntry.setText(str(maximum))
try:
scale = safeEval(self.valEntry.text(), length=self.father.current.len(), Type='FI')
except Exception:
raise SsnakeException("Normalize: invalid multiplier")
type = self.typeDrop.currentIndex()
if type == 0:
val, xValues, yValues, datMax = self.father.current.Integrals([minimum], [maximum])
elif type == 1:
val = self.father.current.MaxMin(minimum, maximum, type='max')
elif type == 2:
val = self.father.current.MaxMin(minimum, maximum, type='min')
self.father.current.normalize( 1.0 / val, scale, type, self.singleSlice.isChecked())
##########################################################################################
class XaxWindow(wc.ToolWindow):
RESIZABLE = True
NAME = "User defined x-axis"
def __init__(self, parent):
super(XaxWindow, self).__init__(parent)
self.axisSize = self.father.current.len()
self.grid.addWidget(wc.QLabel("Input x-axis values:"), 0, 0, 1, 2)
self.typeDropdown = QtWidgets.QComboBox()
self.typeDropdown.addItems(['Expression', 'Linear', 'Logarithmic'])
self.typeDropdown.activated.connect(self.typeChanged)
self.grid.addWidget(self.typeDropdown, 1, 0, 1, 2)
self.exprEntry = wc.QLineEdit('', self.xaxPreview)
self.grid.addWidget(self.exprEntry, 2, 0, 1, 2)
# Linear
self.linStartLabel = wc.QLeftLabel("Start [s]:")
self.linStopLabel = wc.QLeftLabel("Stop [s]:")
self.linStartLabel.hide()
self.linStopLabel.hide()
self.grid.addWidget(self.linStartLabel, 3, 0, 1, 1)
self.grid.addWidget(self.linStopLabel, 4, 0, 1, 1)
self.linStartEntry = wc.QLineEdit('', self.xaxPreview)
self.linStartEntry.setMaximumWidth(120)
self.linStartEntry.hide()
self.grid.addWidget(self.linStartEntry, 3, 1, 1, 1)
self.linStopEntry = wc.QLineEdit('', self.xaxPreview)
self.linStopEntry.setMaximumWidth(120)
self.linStopEntry.hide()
self.grid.addWidget(self.linStopEntry, 4, 1, 1, 1)
# Log
self.logStartLabel = wc.QLeftLabel("Start [s]:")
self.logStopLabel = wc.QLeftLabel("Stop [s]:")
self.logStartLabel.hide()
self.logStopLabel.hide()
self.grid.addWidget(self.logStartLabel, 5, 0, 1, 1)
self.grid.addWidget(self.logStopLabel, 6, 0, 1, 1)
self.logStartEntry = wc.QLineEdit('', self.xaxPreview)
self.logStartEntry.setMaximumWidth(120)
self.logStartEntry.hide()
self.grid.addWidget(self.logStartEntry, 5, 1, 1, 1)
self.logStopEntry = wc.QLineEdit('', self.xaxPreview)
self.logStopEntry.setMaximumWidth(120)
self.logStopEntry.hide()
self.grid.addWidget(self.logStopEntry, 6, 1, 1, 1)
self.table = QtWidgets.QTableWidget(self.axisSize, 2)
self.table.setHorizontalHeaderLabels(['Index', 'Value [s]'])
self.table.verticalHeader().hide()
for val in range(self.axisSize):
item = QtWidgets.QTableWidgetItem(str(val))
item.setFlags(QtCore.Qt.ItemIsEnabled)
self.table.setItem(int(val), 0, item)
item2 = QtWidgets.QTableWidgetItem('')
item2.setFlags(QtCore.Qt.ItemIsEnabled)
self.table.setItem(int(val), 1, item2)
# self.table.setVerticalHeaderLabels([str(a) for a in range(self.axisSize)])
self.grid.addWidget(self.table, 12, 0, 1, 2)
self.resize(250, 500)
def typeChanged(self, index):
if index == 0: # If expr
self.exprEntry.show()
self.linStartLabel.hide()
self.linStopLabel.hide()
self.linStartEntry.hide()
self.linStopEntry.hide()
self.logStartLabel.hide()
self.logStopLabel.hide()
self.logStartEntry.hide()
self.logStopEntry.hide()
elif index == 1:
self.exprEntry.hide()
self.linStartLabel.show()
self.linStopLabel.show()
self.linStartEntry.show()
self.linStopEntry.show()
self.logStartLabel.hide()
self.logStopLabel.hide()
self.logStartEntry.hide()
self.logStopEntry.hide()
elif index == 2:
self.exprEntry.hide()
self.linStartLabel.hide()
self.linStopLabel.hide()
self.linStartEntry.hide()
self.linStopEntry.hide()
self.logStartLabel.show()
self.logStopLabel.show()
self.logStartEntry.show()
self.logStopEntry.show()
def getValues(self):
if self.typeDropdown.currentIndex() == 0:
env = vars(np).copy()
env['length'] = self.father.current.len() # so length can be used to in equations
env['euro'] = lambda fVal, num=self.axisSize: func.euro(fVal, num)
try:
val = np.array(eval(self.exprEntry.text(), env),dtype=float) # find a better solution, also add catch for exceptions
except SyntaxError:
try:
val = np.fromstring(self.exprEntry.text(), sep=' ')
val2 = np.fromstring(self.exprEntry.text(), sep=',')
if len(val2) > len(val):
val = val2
except Exception as e:
raise SsnakeException(str(e))
except Exception as e:
raise SsnakeException(str(e))
if not isinstance(val, (list, np.ndarray)):
raise SsnakeException("X-axis: Input is not a list or array")
if len(val) != self.father.current.len():
raise SsnakeException("X-axis: Length of input does not match length of data")
if not all(isinstance(x, (int, float)) for x in val):
raise SsnakeException("X-axis: Array is not all of int or float type")
elif self.typeDropdown.currentIndex() == 1:
start = safeEval(self.linStartEntry.text(), Type='FI')
stop = safeEval(self.linStopEntry.text(), Type='FI')
if start is None:
raise SsnakeException("X-axis: linear start value is not valid")
if stop is None:
raise SsnakeException("X-axis: linear stop value is not valid")
val = np.linspace(start, stop, self.axisSize)
elif self.typeDropdown.currentIndex() == 2:
start = safeEval(self.logStartEntry.text(), Type='FI')
stop = safeEval(self.logStopEntry.text(), Type='FI')
if start is None or start <= 0.0:
raise SsnakeException("X-axis: logarithmic start value is not valid")
if stop is None or stop <= 0.0:
raise SsnakeException("X-axis: logarithmic stop value is not valid")
val = np.logspace(np.log10(start), np.log10(stop), self.axisSize)
return val
def xaxPreview(self, *args):
val = self.getValues()
if val is None: # if error return. Messages are handled by the called function
return
for i in range(self.axisSize):
item = QtWidgets.QTableWidgetItem('{:.6g}'.format(val[i]))
item.setFlags(QtCore.Qt.ItemIsEnabled)
self.table.setItem(i, 1, item)
self.father.current.setXaxPreview(np.array(val))
def applyFunc(self):
val = self.getValues()
if val is None: # if error return. Messages are handled by the called function
return
self.father.current.setXax(np.array(val))
##########################################################################################
class RefWindow(wc.ToolWindow):
NAME = "Reference"
def __init__(self, parent):
super(RefWindow, self).__init__(parent)
# Secondary reference definitions
file = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + "References.txt"
with open(file) as refFile:
refList = [line.strip().split('\t') for line in refFile]
secRefNames = ["User Defined"]
secRefValues = ["0.0"]
for entry in refList:
secRefNames.append(entry[0])
secRefValues.append(entry[1])
self.secRefNames = secRefNames
self.secRefValues = secRefValues
if parent.current.spec() == 0:
self.closeEvent()
raise SsnakeException('Setting ppm is only available for frequency data')
self.grid.addWidget(wc.QLabel("Name:"), 0, 0)
self.refName = wc.QLineEdit()
self.grid.addWidget(self.refName, 1, 0)
self.grid.addWidget(wc.QLabel("Frequency [MHz]:"), 2, 0)
self.freqEntry = wc.QLineEdit(("%.7f" % (self.father.current.ref() * 1e-6)), self.preview)
self.grid.addWidget(self.freqEntry, 3, 0)
self.grid.addWidget(wc.QLabel("Secondary Reference:"), 4, 0)
self.refSecond = QtWidgets.QComboBox(parent=self)
self.refSecond.addItems(self.secRefNames)
self.refSecond.activated.connect(self.fillSecondaryRef)
self.grid.addWidget(self.refSecond, 5, 0)
self.grid.addWidget(wc.QLabel("Reference [ppm]:"), 6, 0)
self.refEntry = wc.QLineEdit("0.0", self.preview)
self.grid.addWidget(self.refEntry, 7, 0)
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
def preview(self, *args):
freq = safeEval(self.freqEntry.text(), length=self.father.current.len(), Type='FI')
ref = safeEval(self.refEntry.text(), length=self.father.current.len(), Type='FI')
if freq is None or ref is None:
return
self.freqEntry.setText("%.7f" % (freq))
self.refEntry.setText(str(ref))
def fillSecondaryRef(self):
self.refEntry.setText(self.secRefValues[self.refSecond.currentIndex()])
def applyAndClose(self):
self.father.current.peakPickReset()
freq = safeEval(self.freqEntry.text(), length=self.father.current.len(), Type='FI')
ref = safeEval(self.refEntry.text(), length=self.father.current.len(), Type='FI')
if freq is None or ref is None:
raise SsnakeException("Not a valid value")
freq = freq * 1e6
reffreq = freq / (1.0 + ref * 1e-6)
givenname = self.refName.text()
nameOK = True
if givenname: # If name is filled in
if givenname in self.father.father.referenceName: # if exists
self.father.father.dispMsg("Reference name '" + givenname + "' already exists")
nameOK = False
else:
self.father.father.referenceAdd(reffreq, givenname)
if nameOK:
self.father.current.setRef(reffreq)
self.father.bottomframe.upd()
self.closeEvent()
def picked(self, pos):
self.freqEntry.setText("%.7f" % ((self.father.current.ref() + self.father.current.xax()[pos[0]]) * 1e-6))
self.father.current.peakPickFunc = lambda pos, self=self: self.picked(pos)
self.father.current.peakPick = True
##########################################################################################
class HistoryWindow(wc.ToolWindow):
NAME = "Processing history"
RESIZABLE = True
MENUDISABLE = True
def __init__(self, parent):
super(HistoryWindow, self).__init__(parent)
self.cancelButton.hide()
self.valEntry = QtWidgets.QTextEdit()
self.valEntry.setReadOnly(True)
self.valEntry.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
self.valEntry.setText(self.father.masterData.getHistory())
self.grid.addWidget(self.valEntry, 1, 0)
self.resize(550, 700)
#########################################################################################
class OrigListWidget(QtWidgets.QListWidget):
def __init__(self, parent=None, dest=None):
super(OrigListWidget, self).__init__(parent)
self.setDragDropMode(QtWidgets.QAbstractItemView.DragDrop)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setAcceptDrops(True)
self.dest = dest
def dropEvent(self, event):
if event.source() == self:
pass
else:
if self.dest is not None:
for item in self.dest.selectedItems():
self.dest.takeItem(self.dest.row(item))
def mouseDoubleClickEvent(self, event):
for item in self.selectedItems():
QtWidgets.QListWidgetItem(item.text(), self.dest)
#########################################################################################
class DestListWidget(QtWidgets.QListWidget):
def __init__(self, parent=None):
super(DestListWidget, self).__init__(parent)
self.setDragDropMode(QtWidgets.QAbstractItemView.DragDrop)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setAcceptDrops(True)
def dropEvent(self, event):
if event.source() == self:
event.setDropAction(QtCore.Qt.MoveAction)
super(DestListWidget, self).dropEvent(event)
else:
event.setDropAction(QtCore.Qt.CopyAction)
super(DestListWidget, self).dropEvent(event)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Delete:
self.deleteSelected()
def deleteSelected(self):
for item in self.selectedItems():
self.takeItem(self.row(item))
def moveSelection(self,direction = 'up'):
#Get selected items
index = [self.row(item) for item in self.selectedItems()]
items = [item for item in self.selectedItems()]
#Sort items based on index, to get move order right
items = [x for _,x in sorted(zip(index,items))]
if direction == 'up':
check = 0
step = -1
elif direction == 'down':
check = self.count() - 1
step = +1
items = items[::-1] #Invert move order
if check in index: #If one item already at limit
return
#If not, move one line
for item in items:
row = self.row(item)
currentItem = self.takeItem(row)
self.insertItem(row + step, currentItem)
#Reselect the items
for item in items:
item.setSelected(True)
def mouseDoubleClickEvent(self, event):
self.deleteSelected()
##########################################################################################
class CombineWorkspaceWindow(wc.ToolWindow):
NAME = "Combine workspaces"
RESIZABLE = True
def __init__(self, parent):
super(CombineWorkspaceWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Workspaces:"), 0, 0)
self.grid.addWidget(wc.QLabel("Combined spectrum:"), 0, 2)
self.listB = DestListWidget(self)
self.listA = OrigListWidget(self, self.listB)
for i in self.father.workspaceNames:
QtWidgets.QListWidgetItem(i, self.listA).setToolTip(i)
self.grid.addWidget(self.listA, 1, 0, 2, 1)
self.grid.addWidget(self.listB, 1, 2, 2, 1)
self.rightPush = QtWidgets.QPushButton(u"\u2192", self)
self.leftPush = QtWidgets.QPushButton(u"\u2190", self)
self.downPush = QtWidgets.QPushButton(u"\u2193", self)
self.upPush = QtWidgets.QPushButton(u"\u2191", self)
self.rightPush.setSizePolicy(
QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Expanding)
self.leftPush.setSizePolicy(
QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Expanding)
self.downPush.setSizePolicy(
QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Expanding)
self.upPush.setSizePolicy(
QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Expanding)
self.grid.addWidget(self.rightPush, 1, 1)
self.grid.addWidget(self.leftPush, 2, 1)
self.grid.addWidget(self.upPush, 1, 3)
self.grid.addWidget(self.downPush, 2, 3)
self.leftPush.clicked.connect(self.right2left)
self.rightPush.clicked.connect(self.left2right)
self.upPush.clicked.connect(self.moveUp)
self.downPush.clicked.connect(self.moveDown)
self.resize(500, 400)
def right2left(self):
self.listB.deleteSelected()
def left2right(self):
for item in self.listA.selectedItems():
self.listB.addItem(item.text())
def moveUp(self):
self.listB.moveSelection('up')
def moveDown(self):
self.listB.moveSelection('down')
def applyFunc(self, *args):
items = []
for index in range(self.listB.count()):
items.append(self.listB.item(index).text())
if not items:
raise SsnakeException("Please select at least one workspace to combine")
self.father.combineWorkspace(items)
def closeEvent(self, *args):
if self.MENUDISABLE:
self.father.menuEnable(True)
self.deleteLater()
##########################################################################################
class CombineLoadWindow(wc.ToolWindow):
NAME = "Open & Combine"
BROWSE = True
RESIZABLE = True
MENUDISABLE = False
def __init__(self, parent):
super(CombineLoadWindow, self).__init__(parent)
self.setAcceptDrops(True)
self.grid.addWidget(wc.QLabel("Data to be Combined:"), 0, 0)
self.specList = DestListWidget(self)
self.grid.addWidget(self.specList, 1, 0)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dropEvent(self, event):
for url in event.mimeData().urls():
name = url.toLocalFile()
self.specList.addItem(name)
def browse(self):
fileList = QtWidgets.QFileDialog.getOpenFileNames(self, 'Open File', self.father.lastLocation)
if isinstance(fileList, tuple):
fileList = fileList[0]
for filePath in fileList:
if filePath: # if not cancelled
self.father.lastLocation = os.path.dirname(filePath) # Save used path
if not filePath:
return
self.specList.addItem(filePath)
def applyFunc(self, *args):
items = []
for index in range(self.specList.count()):
items.append(self.specList.item(index).text())
if not items:
raise SsnakeException("Please select at least one workspace to combine")
self.father.loadAndCombine(items)
def closeEvent(self, *args):
self.deleteLater()
##########################################################################################
class MonitorWindow(QtWidgets.QWidget):
def __init__(self, parent):
super(MonitorWindow, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.Tool)
self.father = parent
self.setWindowTitle("Monitor")
layout = QtWidgets.QGridLayout(self)
grid = QtWidgets.QGridLayout()
fileName = self.father.masterData.filePath[0][0]
if len(fileName) > 58:
fileName = fileName[:55] + '...'
fileLabel = wc.QLabel("File: " + fileName)
fileLabel.setToolTip(self.father.masterData.filePath[0][0])
layout.addWidget(fileLabel, 0, 0, 1, 3)
layout.addLayout(grid, 1, 0, 1, 3)
grid.addWidget(wc.QLabel("Macros:"), 0, 0)
grid.addWidget(wc.QLabel("Apply after loading:"), 0, 1)
self.listB = DestListWidget(self)
for i in self.father.monitorMacros:
QtWidgets.QListWidgetItem(i, self.listB).setToolTip(i)
self.listA = OrigListWidget(self, self.listB)
for i in self.father.father.macros.keys():
QtWidgets.QListWidgetItem(i, self.listA).setToolTip(i)
grid.addWidget(self.listA, 1, 0)
grid.addWidget(self.listB, 1, 1)
grid.addWidget(wc.QLabel("Delay [s]:"), 2, 0)
self.delTime = wc.SsnakeDoubleSpinBox()
self.delTime.setMaximum(10000)
self.delTime.setMinimum(0)
self.delTime.setSingleStep(0.1)
self.delTime.setValue(0.5)
grid.addWidget(self.delTime, 2, 1)
cancelButton = QtWidgets.QPushButton("&Close")
cancelButton.clicked.connect(self.closeEvent)
watchButton = QtWidgets.QPushButton("&Watch")
watchButton.clicked.connect(self.applyAndClose)
unwatchButton = QtWidgets.QPushButton("&Unwatch")
unwatchButton.clicked.connect(self.stopAndClose)
box = QtWidgets.QDialogButtonBox()
box.addButton(cancelButton, QtWidgets.QDialogButtonBox.RejectRole)
box.addButton(watchButton, QtWidgets.QDialogButtonBox.ActionRole)
box.addButton(unwatchButton, QtWidgets.QDialogButtonBox.ActionRole)
layout.addWidget(box, 3, 0)
layout.setColumnStretch(4, 1)
self.show()
self.setFixedSize(self.size())
self.father.menuEnable(False)
self.setGeometry(self.frameSize().width() - self.geometry().width(), self.frameSize().height() - self.geometry().height(), 0, 0)
def applyAndClose(self, *args):
self.father.stopMonitor()
items = []
for index in range(self.listB.count()):
items.append(self.listB.item(index).text())
delay = self.delTime.value()
self.father.startMonitor(items, delay)
self.closeEvent()
def stopAndClose(self, *args):
self.father.stopMonitor()
self.closeEvent()
def closeEvent(self, *args):
self.father.menuEnable(True)
self.deleteLater()
##############################################################################
class PlotSettingsWindow(wc.ToolWindow):
NAME = "Preferences"
def __init__(self, parent):
super(PlotSettingsWindow, self).__init__(parent)
tabWidget = QtWidgets.QTabWidget()
tab1 = QtWidgets.QWidget()
tab2 = QtWidgets.QWidget()
tab3 = QtWidgets.QWidget()
tabWidget.addTab(tab1, "Plot")
tabWidget.addTab(tab2, "Contour")
tabWidget.addTab(tab3, "2D Colour")
grid1 = QtWidgets.QGridLayout()
grid2 = QtWidgets.QGridLayout()
grid3 = QtWidgets.QGridLayout()
tab1.setLayout(grid1)
tab2.setLayout(grid2)
tab3.setLayout(grid3)
grid1.setColumnStretch(10, 1)
grid1.setRowStretch(10, 1)
grid2.setColumnStretch(10, 1)
grid2.setRowStretch(10, 1)
grid3.setColumnStretch(10, 1)
grid3.setRowStretch(10, 1)
grid1.addWidget(QtWidgets.QLabel("Linewidth:"), 1, 0)
self.lwSpinBox = wc.SsnakeDoubleSpinBox()
self.lwSpinBox.setSingleStep(0.1)
self.lwSpinBox.setValue(self.father.current.viewSettings["linewidth"])
self.lwSpinBox.valueChanged.connect(self.preview)
grid1.addWidget(self.lwSpinBox, 1, 1)
self.color = self.father.current.viewSettings["color"]
lineColorButton = QtWidgets.QPushButton("Line colour")
lineColorButton.clicked.connect(self.setColor)
grid1.addWidget(lineColorButton, 2, 0)
grid1.addWidget(QtWidgets.QLabel("Colour range:"), 3, 0)
self.crEntry = QtWidgets.QComboBox(self)
self.crEntry.addItems(views.COLORRANGELIST)
self.crEntry.setCurrentIndex(self.father.current.getColorRange())
self.crEntry.currentIndexChanged.connect(self.preview)
grid1.addWidget(self.crEntry, 3, 1)
self.xgridCheck = QtWidgets.QCheckBox("x-grid")
self.xgridCheck.setChecked(self.father.current.viewSettings["grids"][0])
self.xgridCheck.stateChanged.connect(self.preview)
grid1.addWidget(self.xgridCheck, 4, 0, 1, 2)
self.ygridCheck = QtWidgets.QCheckBox("y-grid")
self.ygridCheck.setChecked(self.father.current.viewSettings["grids"][1])
grid1.addWidget(self.ygridCheck, 5, 0, 1, 2)
self.ygridCheck.stateChanged.connect(self.preview)
grid1.addWidget(QtWidgets.QLabel("Min X Ticks:"), 6, 0)
self.xTicksSpinBox = wc.SsnakeSpinBox()
self.xTicksSpinBox.setValue(self.father.current.viewSettings["minXTicks"])
self.xTicksSpinBox.valueChanged.connect(self.preview)
grid1.addWidget(self.xTicksSpinBox, 6, 1)
grid1.addWidget(QtWidgets.QLabel("Min Y Ticks:"), 7, 0)
self.yTicksSpinBox = wc.SsnakeSpinBox()
self.yTicksSpinBox.setValue(self.father.current.viewSettings["minYTicks"])
self.yTicksSpinBox.valueChanged.connect(self.preview)
grid1.addWidget(self.yTicksSpinBox, 7, 1)
grid2.addWidget(QtWidgets.QLabel("Colourmap:"), 0, 0)
self.cmEntry = QtWidgets.QComboBox(self)
self.cmEntry.addItems(views.COLORMAPLIST)
self.cmEntry.setCurrentIndex(self.father.current.getColorMap())
self.cmEntry.currentIndexChanged.connect(self.preview)
grid2.addWidget(self.cmEntry, 0, 1)
self.constColorCheck = QtWidgets.QCheckBox("Constant colours")
self.constColorCheck.setChecked(self.father.current.viewSettings["contourConst"])
grid2.addWidget(self.constColorCheck, 1, 0)
self.constColorCheck.stateChanged.connect(self.preview)
self.posColor = self.father.current.viewSettings["contourColors"][0]
posColorButton = QtWidgets.QPushButton("Positive colour")
posColorButton.clicked.connect(self.setPosColor)
grid2.addWidget(posColorButton, 2, 0)
self.negColor = self.father.current.viewSettings["contourColors"][1]
negColorButton = QtWidgets.QPushButton("Negative colour")
negColorButton.clicked.connect(self.setNegColor)
grid2.addWidget(negColorButton, 3, 0)
grid3.addWidget(QtWidgets.QLabel("Colourmap:"), 0, 0)
self.cmEntry2D = QtWidgets.QComboBox(self)
self.cmEntry2D.addItems(views.COLORMAPLIST)
self.cmEntry2D.setCurrentIndex(self.father.current.getPColorMap())
self.cmEntry2D.currentIndexChanged.connect(self.preview)
grid3.addWidget(self.cmEntry2D, 0, 1)
self.grid.addWidget(tabWidget, 0, 0)
def preview(self, *args):
tmpLw = self.father.current.viewSettings["linewidth"]
self.father.current.setLw(self.lwSpinBox.value())
tmpXTicks = self.father.current.viewSettings["minXTicks"]
tmpYTicks = self.father.current.viewSettings["minYTicks"]
self.father.current.setTickNum(self.xTicksSpinBox.value(), self.yTicksSpinBox.value())
tmpColor = self.father.current.viewSettings["color"]
self.father.current.setColor(self.color)
tmpColorRange = self.father.current.getColorRange()
self.father.current.setColorRange(self.crEntry.currentIndex())
tmpColorMap = self.father.current.getColorMap()
self.father.current.setColorMap(self.cmEntry.currentIndex())
tmpGrids = self.father.current.viewSettings["grids"]
self.father.current.setGrids([self.xgridCheck.isChecked(), self.ygridCheck.isChecked()])
tmpContourConst = self.father.current.viewSettings["contourConst"]
self.father.current.setContourConst(self.constColorCheck.isChecked())
tmpContourColors = self.father.current.viewSettings["contourColors"]
tmpColorMap2D = self.father.current.getPColorMap()
self.father.current.setPColorMap(self.cmEntry2D.currentIndex())
self.father.current.setContourColors([self.posColor, self.negColor])
self.father.current.showFid()
self.father.current.setLw(tmpLw)
self.father.current.setTickNum(tmpXTicks, tmpYTicks)
self.father.current.setColor(tmpColor)
self.father.current.setColorRange(tmpColorRange)
self.father.current.setColorMap(tmpColorMap)
self.father.current.setGrids(tmpGrids)
self.father.current.setContourConst(tmpContourConst)
self.father.current.setContourColors(tmpContourColors)
self.father.current.setPColorMap(tmpColorMap2D)
def setColor(self, *args):
tmp = QtWidgets.QColorDialog.getColor(QtGui.QColor(self.color))
if tmp.isValid():
self.color = tmp.name()
self.preview()
def setPosColor(self, *args):
tmp = QtWidgets.QColorDialog.getColor(QtGui.QColor(self.posColor))
if tmp.isValid():
self.posColor = tmp.name()
self.preview()
def setNegColor(self, *args):
tmp = QtWidgets.QColorDialog.getColor(QtGui.QColor(self.negColor))
if tmp.isValid():
self.negColor = tmp.name()
self.preview()
def applyFunc(self, *args):
self.father.current.setColor(self.color)
self.father.current.setLw(self.lwSpinBox.value())
self.father.current.setTickNum(self.xTicksSpinBox.value(), self.yTicksSpinBox.value())
self.father.current.setGrids([self.xgridCheck.isChecked(), self.ygridCheck.isChecked()])
self.father.current.setColorRange(self.crEntry.currentIndex())
self.father.current.setColorMap(self.cmEntry.currentIndex())
self.father.current.setContourConst(self.constColorCheck.isChecked())
self.father.current.setContourColors([self.posColor, self.negColor])
self.father.current.setPColorMap(self.cmEntry2D.currentIndex())
##############################################################################
class errorWindow(wc.ToolWindow):
NAME = "Error Messages"
RESIZABLE = True
MENUDISABLE = False
def __init__(self, parent):
super(errorWindow, self).__init__(parent)
self.cancelButton.hide()
self.errorQList = QtWidgets.QListWidget(self)
self.errorQList.currentRowChanged.connect(self.rowChange)
for error in self.father.errors:
if len(error[1]) == 3:
tmp = QtWidgets.QListWidgetItem(error[0] + ': Program error. Please report.', self.errorQList)
tmp.setForeground(QtGui.QBrush(QtGui.QColor('red')))
elif len(error[1]) == 1:
QtWidgets.QListWidgetItem(error[0] + ': ' + error[1][0], self.errorQList)
self.errorEdit = QtWidgets.QTextEdit(self)
self.errorEdit.setReadOnly(True)
errorText = ''
self.errorEdit.setHtml(errorText)
self.grid.addWidget(self.errorQList, 0, 0, 1, 3)
self.grid.addWidget(self.errorEdit, 1, 0, 1, 3)
self.resize(550, 700)
def rowChange(self, row):
errorText = ''
error = self.father.errors[row]
if len(error[1]) == 3:
errorText = errorText + error[0] + '<br>'
for line in tb.format_exception(error[1][0], error[1][1], error[1][2]):
errorText = errorText + line + '<br>'
self.errorEdit.setHtml(errorText)
def closeEvent(self, *args):
self.deleteLater()
##############################################################################
class PreferenceWindow(QtWidgets.QWidget):
def __init__(self, parent):
super(PreferenceWindow, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.Tool)
self.father = parent
self.setWindowTitle("Preferences")
tabWidget = QtWidgets.QTabWidget()
tab1 = QtWidgets.QWidget()
tab2 = QtWidgets.QWidget()
tab3 = QtWidgets.QWidget()
tab4 = QtWidgets.QWidget()
tabWidget.addTab(tab1, "Window")
tabWidget.addTab(tab2, "Plot")
tabWidget.addTab(tab3, "Contour")
tabWidget.addTab(tab4, "2D Colour")
grid1 = QtWidgets.QGridLayout()
grid2 = QtWidgets.QGridLayout()
grid3 = QtWidgets.QGridLayout()
grid4 = QtWidgets.QGridLayout()
tab1.setLayout(grid1)
tab2.setLayout(grid2)
tab3.setLayout(grid3)
tab4.setLayout(grid4)
grid1.setColumnStretch(10, 1)
grid1.setRowStretch(10, 1)
grid2.setColumnStretch(10, 1)
grid2.setRowStretch(10, 1)
grid3.setColumnStretch(10, 1)
grid3.setRowStretch(10, 1)
grid4.setColumnStretch(10, 1)
grid4.setRowStretch(10, 1)
# grid1.addWidget(wc.QLabel("Window size:"), 0, 0, 1, 2)
grid1.addWidget(wc.QLabel("Width:"), 1, 0)
self.widthSpinBox = wc.SsnakeSpinBox()
self.widthSpinBox.setMaximum(100000)
self.widthSpinBox.setMinimum(1)
self.widthSpinBox.setValue(self.father.defaultWidth)
grid1.addWidget(self.widthSpinBox, 1, 1)
grid1.addWidget(wc.QLabel("Height:"), 2, 0)
self.heightSpinBox = wc.SsnakeSpinBox()
self.heightSpinBox.setMaximum(100000)
self.heightSpinBox.setMinimum(1)
self.heightSpinBox.setValue(self.father.defaultHeight)
grid1.addWidget(self.heightSpinBox, 2, 1)
self.maximizedCheck = QtWidgets.QCheckBox("Open maximized")
self.maximizedCheck.setChecked(self.father.defaultMaximized)
grid1.addWidget(self.maximizedCheck, 3, 0, 1, 2)
self.askNameCheck = QtWidgets.QCheckBox("Ask workspace name when loading")
self.askNameCheck.setChecked(self.father.defaultAskName)
grid1.addWidget(self.askNameCheck, 4, 0, 1, 2)
self.toolbarCheck = QtWidgets.QCheckBox("Show Shortcut Toolbar")
self.toolbarCheck.setChecked(self.father.defaultToolBar)
grid1.addWidget(self.toolbarCheck, 5, 0, 1, 2)
self.tooltipCheck = QtWidgets.QCheckBox("Show Tooltips")
self.tooltipCheck.setChecked(self.father.defaultTooltips)
grid1.addWidget(self.tooltipCheck, 7, 0, 1, 2)
editToolbarButton = QtWidgets.QPushButton("Edit Toolbar")
editToolbarButton.clicked.connect(lambda: ToolbarWindow(self))
grid1.addWidget(editToolbarButton, 6, 0, 1, 2)
self.currentToolbar = self.father.defaultToolbarActionList
self.startupgroupbox = QtWidgets.QGroupBox("Startup Directory")
self.startupgroupbox.setCheckable(True)
self.startupgroupbox.setChecked(self.father.defaultStartupBool)
grid1.addWidget(self.startupgroupbox, 8, 0, 1, 2)
startupgrid = QtWidgets.QGridLayout()
self.startupgroupbox.setLayout(startupgrid)
self.startupDirEntry = QtWidgets.QLineEdit(self)
self.startupDirEntry.setText(self.father.defaultStartupDir)
startupgrid.addWidget(self.startupDirEntry, 0, 0)
self.startupDirButton = QtWidgets.QPushButton("Browse", self)
self.startupDirButton.clicked.connect(self.browseStartup)
startupgrid.addWidget(self.startupDirButton, 0, 1)
# grid2 definitions
grid2.addWidget(QtWidgets.QLabel("Linewidth:"), 1, 0)
self.lwSpinBox = wc.SsnakeDoubleSpinBox()
self.lwSpinBox.setSingleStep(0.1)
self.lwSpinBox.setValue(self.father.defaultLinewidth)
grid2.addWidget(self.lwSpinBox, 1, 1)
self.color = self.father.defaultColor
lineColorButton = QtWidgets.QPushButton("Line colour")
lineColorButton.clicked.connect(self.setColor)
grid2.addWidget(lineColorButton, 2, 0)
grid2.addWidget(QtWidgets.QLabel("Colour range:"), 3, 0)
self.crEntry = QtWidgets.QComboBox(self)
self.crEntry.addItems(views.COLORRANGELIST)
self.crEntry.setCurrentIndex(views.COLORRANGELIST.index(self.father.defaultColorRange))
grid2.addWidget(self.crEntry, 3, 1)
self.xgridCheck = QtWidgets.QCheckBox("x-grid")
self.xgridCheck.setChecked(self.father.defaultGrids[0])
grid2.addWidget(self.xgridCheck, 4, 0, 1, 2)
self.ygridCheck = QtWidgets.QCheckBox("y-grid")
self.ygridCheck.setChecked(self.father.defaultGrids[1])
grid2.addWidget(self.ygridCheck, 5, 0, 1, 2)
grid2.addWidget(QtWidgets.QLabel("Min X Ticks:"), 6, 0)
self.xTicksSpinBox = wc.SsnakeSpinBox()
self.xTicksSpinBox.setValue(self.father.defaultMinXTicks)
grid2.addWidget(self.xTicksSpinBox, 6, 1)
grid2.addWidget(QtWidgets.QLabel("Min Y Ticks:"), 7, 0)
self.yTicksSpinBox = wc.SsnakeSpinBox()
self.yTicksSpinBox.setValue(self.father.defaultMinYTicks)
grid2.addWidget(self.yTicksSpinBox, 7, 1)
grid2.addWidget(QtWidgets.QLabel("Units:"), 8, 0)
self.unitGroup = QtWidgets.QButtonGroup()
button = QtWidgets.QRadioButton("s/Hz")
self.unitGroup.addButton(button, 0)
grid2.addWidget(button, 9, 1)
button = QtWidgets.QRadioButton("ms/kHz")
self.unitGroup.addButton(button, 1)
grid2.addWidget(button, 10, 1)
button = QtWidgets.QRadioButton(u"μs/MHz")
self.unitGroup.addButton(button, 2)
grid2.addWidget(button, 11, 1)
self.unitGroup.button(self.father.defaultUnits).setChecked(True)
self.ppmCheck = QtWidgets.QCheckBox("ppm")
self.ppmCheck.setChecked(self.father.defaultPPM)
grid2.addWidget(self.ppmCheck, 12, 1)
self.zeroScrollCheck = QtWidgets.QCheckBox("Scroll y-axis from zero")
self.zeroScrollCheck.setChecked(self.father.defaultZeroScroll)
grid2.addWidget(self.zeroScrollCheck, 13, 0, 1, 2)
grid2.addWidget(QtWidgets.QLabel("Zoom step:"), 14, 0)
self.ZoomStepSpinBox = wc.SsnakeDoubleSpinBox()
self.ZoomStepSpinBox.setSingleStep(0.1)
self.ZoomStepSpinBox.setValue(self.father.defaultZoomStep)
grid2.addWidget(self.ZoomStepSpinBox, 14, 1)
self.showTitleCheck = QtWidgets.QCheckBox("Show title in plot")
self.showTitleCheck.setChecked(self.father.defaultShowTitle)
grid2.addWidget(self.showTitleCheck, 15, 0, 1, 2)
# grid3 definitions
grid3.addWidget(QtWidgets.QLabel("Colourmap:"), 0, 0)
self.cmEntry = QtWidgets.QComboBox(self)
self.cmEntry.addItems(views.COLORMAPLIST)
self.cmEntry.setCurrentIndex(views.COLORMAPLIST.index(self.father.defaultColorMap))
grid3.addWidget(self.cmEntry, 0, 1)
self.constColorCheck = QtWidgets.QCheckBox("Constant colours")
self.constColorCheck.setChecked(self.father.defaultContourConst)
grid3.addWidget(self.constColorCheck, 1, 0)
self.posColor = self.father.defaultPosColor
posColorButton = QtWidgets.QPushButton("Positive colour")
posColorButton.clicked.connect(self.setPosColor)
grid3.addWidget(posColorButton, 2, 0)
self.negColor = self.father.defaultNegColor
negColorButton = QtWidgets.QPushButton("Negative colour")
negColorButton.clicked.connect(self.setNegColor)
grid3.addWidget(negColorButton, 3, 0)
grid3.addWidget(QtWidgets.QLabel("Width ratio:"), 4, 0)
self.WRSpinBox = wc.SsnakeDoubleSpinBox()
self.WRSpinBox.setSingleStep(0.1)
self.WRSpinBox.setValue(self.father.defaultWidthRatio)
grid3.addWidget(self.WRSpinBox, 4, 1)
grid3.addWidget(QtWidgets.QLabel("Height ratio:"), 5, 0)
self.HRSpinBox = wc.SsnakeDoubleSpinBox()
self.HRSpinBox.setSingleStep(0.1)
self.HRSpinBox.setValue(self.father.defaultHeightRatio)
grid3.addWidget(self.HRSpinBox, 5, 1)
# 2D Colour defs
grid4.addWidget(QtWidgets.QLabel("Colourmap:"), 0, 0)
self.cmEntry2D = QtWidgets.QComboBox(self)
self.cmEntry2D.addItems(views.COLORMAPLIST)
self.cmEntry2D.setCurrentIndex(views.COLORMAPLIST.index(self.father.defaultPColorMap))
grid4.addWidget(self.cmEntry2D, 0, 1)
# Others
layout = QtWidgets.QGridLayout(self)
layout.addWidget(tabWidget, 0, 0, 1, 4)
cancelButton = QtWidgets.QPushButton("&Cancel")
cancelButton.clicked.connect(self.closeEvent)
okButton = QtWidgets.QPushButton("&Store")
okButton.clicked.connect(self.applyAndClose)
resetButton = QtWidgets.QPushButton("&Reset")
resetButton.clicked.connect(self.reset)
box = QtWidgets.QDialogButtonBox()
box.addButton(cancelButton, QtWidgets.QDialogButtonBox.RejectRole)
box.addButton(okButton, QtWidgets.QDialogButtonBox.ActionRole)
box.addButton(resetButton, QtWidgets.QDialogButtonBox.ActionRole)
layout.addWidget(box, 1,0)
layout.setColumnStretch(3, 1)
self.show()
def browseStartup(self, *args):
newDir = QtWidgets.QFileDialog.getExistingDirectory(self, 'Select Directory', self.father.lastLocation, QtWidgets.QFileDialog.ShowDirsOnly)
if newDir:
self.startupDirEntry.setText(newDir)
def setColor(self, *args):
tmp = QtWidgets.QColorDialog.getColor(QtGui.QColor(self.color))
if tmp.isValid():
self.color = tmp.name()
def setPosColor(self, *args):
tmp = QtWidgets.QColorDialog.getColor(QtGui.QColor(self.posColor))
if tmp.isValid():
self.posColor = tmp.name()
def setNegColor(self, *args):
tmp = QtWidgets.QColorDialog.getColor(QtGui.QColor(self.negColor))
if tmp.isValid():
self.negColor = tmp.name()
def applyAndClose(self, *args):
self.father.defaultUnits = self.unitGroup.checkedId()
self.father.defaultPPM = self.ppmCheck.isChecked()
self.father.defaultWidth = self.widthSpinBox.value()
self.father.defaultHeight = self.heightSpinBox.value()
self.father.defaultMaximized = self.maximizedCheck.isChecked()
self.father.defaultAskName = self.askNameCheck.isChecked()
self.father.defaultToolBar = self.toolbarCheck.isChecked()
self.father.defaultTooltips = self.tooltipCheck.isChecked()
self.father.defaultToolbarActionList = self.currentToolbar
self.father.defaultStartupBool = self.startupgroupbox.isChecked()
self.father.defaultStartupDir = self.startupDirEntry.text()
self.father.defaultLinewidth = self.lwSpinBox.value()
self.father.defaultMinXTicks = self.xTicksSpinBox.value()
self.father.defaultMinYTicks = self.yTicksSpinBox.value()
self.father.defaultColor = self.color
self.father.defaultColorRange = self.crEntry.currentText()
self.father.defaultGrids[0] = self.xgridCheck.isChecked()
self.father.defaultGrids[1] = self.ygridCheck.isChecked()
self.father.defaultZeroScroll = self.zeroScrollCheck.isChecked()
self.father.defaultShowTitle = self.showTitleCheck.isChecked()
self.father.defaultZoomStep = self.ZoomStepSpinBox.value()
self.father.defaultColorMap = self.cmEntry.currentText()
self.father.defaultContourConst = self.constColorCheck.isChecked()
self.father.defaultPosColor = self.posColor
self.father.defaultNegColor = self.negColor
self.father.defaultWidthRatio = self.WRSpinBox.value()
self.father.defaultHeightRatio = self.HRSpinBox.value()
self.father.defaultPColorMap = self.cmEntry2D.currentText()
self.father.saveDefaults()
self.closeEvent()
def reset(self, *args):
self.father.resetDefaults()
self.father.saveDefaults()
self.closeEvent()
def closeEvent(self, *args):
self.deleteLater()
##############################################################################
class ToolbarWindow(wc.ToolWindow):
NAME = "Change Toolbar"
RESIZABLE = True
MENUDISABLE = False
def __init__(self, parent):
super(ToolbarWindow, self).__init__(parent)
self.grid.addWidget(wc.QLabel("Actions:"), 0, 0)
self.grid.addWidget(wc.QLabel("Toolbar Actions:"), 0, 1)
self.listB = DestListWidget(self)
for i in self.father.father.defaultToolbarActionList:
QtWidgets.QListWidgetItem(i, self.listB).setToolTip(i)
self.listA = OrigListWidget(self, self.listB)
for i in self.father.father.allActionsList:
QtWidgets.QListWidgetItem(i[0], self.listA).setToolTip(i[0])
self.grid.addWidget(self.listA, 1, 0, 2, 1)
self.grid.addWidget(self.listB, 1, 2, 2, 1)
self.rightPush = QtWidgets.QPushButton(u"\u2192", self)
self.leftPush = QtWidgets.QPushButton(u"\u2190", self)
self.downPush = QtWidgets.QPushButton(u"\u2193", self)
self.upPush = QtWidgets.QPushButton(u"\u2191", self)
self.rightPush.setSizePolicy(
QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Expanding)
self.leftPush.setSizePolicy(
QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Expanding)
self.downPush.setSizePolicy(
QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Expanding)
self.upPush.setSizePolicy(
QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Expanding)
self.grid.addWidget(self.rightPush, 1, 1)
self.grid.addWidget(self.leftPush, 2, 1)
self.grid.addWidget(self.upPush, 1, 3)
self.grid.addWidget(self.downPush, 2, 3)
self.leftPush.clicked.connect(self.right2left)
self.rightPush.clicked.connect(self.left2right)
self.upPush.clicked.connect(self.moveUp)
self.downPush.clicked.connect(self.moveDown)
self.resize(650, 500)
def right2left(self):
self.listB.deleteSelected()
def left2right(self):
for item in self.listA.selectedItems():
self.listB.addItem(item.text())
def moveUp(self):
self.listB.moveSelection('up')
def moveDown(self):
self.listB.moveSelection('down')
def applyAndClose(self, *args):
items = []
for index in range(self.listB.count()):
items.append(self.listB.item(index).text())
self.father.currentToolbar = items
self.closeEvent()
def closeEvent(self, *args):
self.deleteLater()
##############################################################################
class aboutWindow(wc.ToolWindow):
NAME = "About ssNake"
RESIZABLE = True
MENUDISABLE = False
def __init__(self, parent):
super(aboutWindow, self).__init__(parent)
self.cancelButton.hide()
self.logo = QtWidgets.QLabel(self)
self.logo.setPixmap(QtGui.QPixmap(os.path.dirname(os.path.realpath(__file__)) + "/Icons/logo.gif"))
self.tabs = QtWidgets.QTabWidget(self)
self.text = QtWidgets.QTextBrowser(self)
self.text.setOpenExternalLinks(True)
self.license = QtWidgets.QTextBrowser(self)
self.license.setOpenExternalLinks(True)
licenseText = ''
with open(os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'licenseHtml.txt') as f:
licenseText = f.read()
self.license.setHtml(licenseText)
pythonVersion = sys.version
pythonVersion = pythonVersion[:pythonVersion.index(' ')]
from scipy import __version__ as scipyVersion
self.text.setText('<p><b>ssNake ' + VERSION + '</b></p>' +
'<p>Copyright (©) 2016–2020 Bas van Meerten & Wouter Franssen</p>' + '<p>Email: <a href="mailto:[email protected]" >[email protected]</a></p>' +
'<p>Publication: <a href="https://doi.org/10.1016/j.jmr.2019.02.006" >https://doi.org/10.1016/j.jmr.2019.02.006</a></p>' +
'<b>Library versions</b>:<br>Python ' + pythonVersion + '<br>numpy ' + np.__version__ +
'<br>SciPy ' + scipyVersion +
'<br>matplotlib ' + matplotlib.__version__ +
'<br>PyQt ' + QtCore.PYQT_VERSION_STR +
'<br>Qt ' + QtCore.QT_VERSION_STR)
self.thanks = QtWidgets.QTextEdit(self)
self.thanks.setReadOnly(True)
self.thanks.setHtml('<p><b>The ssNake team wishes to thank:</b></p>Prof. Arno Kentgens<br>Koen Tijssen<br>Ole Brauckmann<br>Merijn Blaakmeer<br>Vincent Breukels<br>Ernst van Eck<br>Fleur van Zelst<br>Sander Lambregts<br>Dr. Andreas Brinkmann')
self.tabs.addTab(self.text, 'Version')
self.tabs.addTab(self.thanks, 'Thanks')
self.tabs.addTab(self.license, 'License')
self.grid.addWidget(self.logo, 0, 0, 1, 3, QtCore.Qt.AlignHCenter)
self.grid.addWidget(self.tabs, 1, 0, 1, 3)
self.resize(550, 700)
def closeEvent(self, *args):
self.deleteLater()
##############################################################################
class shiftConversionWindow(wc.ToolWindow):
NAME = "Chemical Shift Conversions"
MENUDISABLE = False
RESIZABLE = True
def __init__(self, parent):
super(shiftConversionWindow, self).__init__(parent)
self.standardGroup = QtWidgets.QGroupBox('Standard Convention:')
self.standardFrame = QtWidgets.QGridLayout()
D11label = wc.QLabel(u'δ' + '<sub>11</sub> [ppm]')
self.standardFrame.addWidget(D11label, 0, 1)
D22label = wc.QLabel(u'δ' + '<sub>22</sub> [ppm]')
self.standardFrame.addWidget(D22label, 0, 2)
D33label = wc.QLabel(u'δ' + '<sub>33</sub> [ppm]')
self.standardFrame.addWidget(D33label, 0, 3)
standardGO = QtWidgets.QPushButton("Go")
standardGO.setMinimumWidth(100)
self.standardFrame.addWidget(standardGO, 1, 0)
standardGO.clicked.connect(lambda: self.shiftCalc(0))
self.D11 = wc.QLineEdit("0")
self.D11.setMinimumWidth(100)
self.standardFrame.addWidget(self.D11, 1, 1)
self.D22 = wc.QLineEdit("0")
self.D22.setMinimumWidth(100)
self.standardFrame.addWidget(self.D22, 1, 2)
self.D33 = wc.QLineEdit("0")
self.D33.setMinimumWidth(100)
self.standardFrame.addWidget(self.D33, 1, 3)
self.standardGroup.setLayout(self.standardFrame)
self.grid.addWidget(self.standardGroup, 0, 0, 1, 3)
# xyz Convention
self.xyzGroup = QtWidgets.QGroupBox('xyz Convention:')
self.xyzFrame = QtWidgets.QGridLayout()
dxxlabel = wc.QLabel(u'δ' + '<sub>xx</sub> [ppm]')
self.xyzFrame.addWidget(dxxlabel, 3, 1)
dyylabel = wc.QLabel(u'δ' + '<sub>yy</sub> [ppm]')
self.xyzFrame.addWidget(dyylabel, 3, 2)
dzzlabel = wc.QLabel(u'δ' + '<sub>zz</sub> [ppm]')
self.xyzFrame.addWidget(dzzlabel, 3, 3)
xyzGO = QtWidgets.QPushButton("Go")
xyzGO.setMinimumWidth(100)
self.xyzFrame.addWidget(xyzGO, 4, 0)
xyzGO.clicked.connect(lambda: self.shiftCalc(1))
self.dxx = wc.QLineEdit("0")
self.dxx.setMinimumWidth(100)
self.xyzFrame.addWidget(self.dxx, 4, 1)
self.dyy = wc.QLineEdit("0")
self.dyy.setMinimumWidth(100)
self.xyzFrame.addWidget(self.dyy, 4, 2)
self.dzz = wc.QLineEdit("0")
self.dzz.setMinimumWidth(100)
self.xyzFrame.addWidget(self.dzz, 4, 3)
self.xyzGroup.setLayout(self.xyzFrame)
self.grid.addWidget(self.xyzGroup, 1, 0, 1, 3)
# Haeberlen Convention
self.haebGroup = QtWidgets.QGroupBox('Haeberlen Convention')
self.haebFrame = QtWidgets.QGridLayout()
disolabel = wc.QLabel(u'δ' + '<sub>iso</sub> [ppm]')
self.haebFrame.addWidget(disolabel, 6, 1)
danisolabel = wc.QLabel(u'δ' + '<sub>aniso</sub> [ppm]')
self.haebFrame.addWidget(danisolabel, 6, 2)
etalabel = wc.QLabel(u'η')
self.haebFrame.addWidget(etalabel, 6, 3)
haeberGO = QtWidgets.QPushButton("Go")
haeberGO.setMinimumWidth(100)
self.haebFrame.addWidget(haeberGO, 7, 0)
haeberGO.clicked.connect(lambda: self.shiftCalc(2))
self.diso = wc.QLineEdit("0")
self.diso.setMinimumWidth(100)
self.haebFrame.addWidget(self.diso, 7, 1)
self.daniso = wc.QLineEdit("0")
self.daniso.setMinimumWidth(100)
self.haebFrame.addWidget(self.daniso, 7, 2)
self.eta = wc.QLineEdit("0")
self.eta.setMinimumWidth(100)
self.haebFrame.addWidget(self.eta, 7, 3)
self.haebGroup.setLayout(self.haebFrame)
self.grid.addWidget(self.haebGroup, 2, 0, 1, 3)
# Hertzfeld berger
self.hbGroup = QtWidgets.QGroupBox('Hertzfeld-Berger Convention')
self.hbFrame = QtWidgets.QGridLayout()
hbdisolabel = wc.QLabel(u'δ' + '<sub>iso</sub> [ppm]')
self.hbFrame.addWidget(hbdisolabel, 9, 1)
omegalabel = wc.QLabel(u'Ω [ppm]')
self.hbFrame.addWidget(omegalabel, 9, 2)
skewlabel = wc.QLabel(u'κ')
self.hbFrame.addWidget(skewlabel, 9, 3)
hbGO = QtWidgets.QPushButton("Go")
hbGO.setMinimumWidth(100)
self.hbFrame.addWidget(hbGO, 10, 0)
hbGO.clicked.connect(lambda: self.shiftCalc(3))
self.hbdiso = wc.QLineEdit("0")
self.hbdiso.setMinimumWidth(100)
self.hbFrame.addWidget(self.hbdiso, 10, 1)
self.hbdaniso = wc.QLineEdit("0")
self.hbdaniso.setMinimumWidth(100)
self.hbFrame.addWidget(self.hbdaniso, 10, 2)
self.hbskew = wc.QLineEdit("0")
self.hbskew.setMinimumWidth(100)
self.hbFrame.addWidget(self.hbskew, 10, 3)
self.hbGroup.setLayout(self.hbFrame)
self.grid.addWidget(self.hbGroup, 3, 0, 1, 3)
# Reset
self.cancelButton.setText("Close")
self.cancelButton.clicked.disconnect()
self.cancelButton.clicked.connect(self.closeEvent)
self.okButton.setText("Reset")
self.okButton.clicked.disconnect()
self.okButton.clicked.connect(self.valueReset)
def shiftCalc(self, Type):
if Type == 0: # If from standard
try:
delta11 = float(safeEval(self.D11.text(), Type='FI'))
delta22 = float(safeEval(self.D22.text(), Type='FI'))
delta33 = float(safeEval(self.D33.text(), Type='FI'))
Values = [delta11, delta22, delta33]
except Exception:
raise SsnakeException("Shift Conversion: Invalid input in Standard Convention")
if Type == 1: # If from xyz
try:
delta11 = float(safeEval(self.dxx.text(), Type='FI')) # Treat xyz as 123, as it reorders them anyway
delta22 = float(safeEval(self.dyy.text(), Type='FI'))
delta33 = float(safeEval(self.dzz.text(), Type='FI'))
Values = [delta11, delta22, delta33]
except Exception:
raise SsnakeException("Shift Conversion: Invalid input in xyz Convention")
if Type == 2: # From haeberlen
try:
eta = float(safeEval(self.eta.text(), Type='FI'))
delta = float(safeEval(self.daniso.text(), Type='FI'))
iso = float(safeEval(self.diso.text(), Type='FI'))
Values = [iso, delta, eta]
except Exception:
raise SsnakeException("Shift Conversion: Invalid input in Haeberlen Convention")
if Type == 3: # From Hertzfeld-Berger
try:
iso = float(safeEval(self.hbdiso.text(), Type='FI'))
span = float(safeEval(self.hbdaniso.text(), Type='FI'))
skew = float(safeEval(self.hbskew.text(), Type='FI'))
Values = [iso, span, skew]
except Exception:
raise SsnakeException("Shift Conversion: Invalid input in Hertzfeld-Berger Convention")
Results = func.shiftConversion(Values, Type) # Do the actual conversion
# Standard convention
self.D11.setText('%#.4g' % Results[0][0])
self.D22.setText('%#.4g' % Results[0][1])
self.D33.setText('%#.4g' % Results[0][2])
# Convert to haeberlen convention and xxyyzz
self.dxx.setText('%#.4g' % Results[1][0])
self.dyy.setText('%#.4g' % Results[1][1])
self.dzz.setText('%#.4g' % Results[1][2])
# Haeberlen def
self.diso.setText('%#.4g' % Results[2][0])
self.daniso.setText('%#.4g' % Results[2][1])
try: # If a number
self.eta.setText('%#.4g' % Results[2][2])
except Exception:
self.eta.setText('ND')
# Convert to Herzfeld-Berger Convention
self.hbdiso.setText('%#.4g' % Results[3][0])
self.hbdaniso.setText('%#.4g' % Results[3][1])
try:
self.hbskew.setText('%#.4g' % Results[3][2])
except Exception:
self.hbskew.setText('ND')
def valueReset(self): # Resets all the boxes to 0
self.D11.setText('0')
self.D22.setText('0')
self.D33.setText('0')
self.dxx.setText('0')
self.dyy.setText('0')
self.dzz.setText('0')
self.eta.setText('0')
self.diso.setText('0')
self.daniso.setText('0')
self.hbskew.setText('0')
self.hbdiso.setText('0')
self.hbdaniso.setText('0')
def closeEvent(self, *args):
self.deleteLater()
##############################################################################
class quadConversionWindow(wc.ToolWindow):
Ioptions = ['1', '3/2', '2', '5/2', '3', '7/2', '4', '9/2', '5', '6', '7']
Ivalues = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 6.0, 7.0]
NAME = "Quadrupolar Coupling Conversions"
RESIZABLE = True
MENUDISABLE = False
def __init__(self, parent):
super(quadConversionWindow, self).__init__(parent)
self.comGroup = QtWidgets.QGroupBox("Common Parameters:")
self.comFrame = QtWidgets.QGridLayout()
nucLabel = wc.QLabel("Nucleus:")
self.comFrame.addWidget(nucLabel, 0, 0)
qindex = [x for (x, val) in enumerate(ISOTOPES['q']) if val is not None and ISOTOPES['spin'][x] != 0.5]
self.names = ['User'] + [val for (x, val) in enumerate(ISOTOPES['formatName']) if x in qindex]
self.I = [0.0] + [val for (x, val) in enumerate(ISOTOPES['spin']) if x in qindex]
self.Qvalues = [0.0] + [val for (x, val) in enumerate(ISOTOPES['q']) if x in qindex]
self.nucDrop = QtWidgets.QComboBox()
self.nucDrop.addItems(self.names)
self.nucDrop.currentIndexChanged.connect(self.setNuc)
self.comFrame.addWidget(self.nucDrop, 1, 0)
Itext = wc.QLabel("I:")
self.comFrame.addWidget(Itext, 0, 1)
self.IEntry = QtWidgets.QComboBox()
self.IEntry.addItems(self.Ioptions)
self.IEntry.setCurrentIndex(0)
self.IEntry.activated.connect(self.userChange)
self.comFrame.addWidget(self.IEntry, 1, 1)
etalabel = wc.QLabel(u'η:')
self.comFrame.addWidget(etalabel, 0, 3)
self.Eta = wc.QLineEdit("0")
self.Eta.setMinimumWidth(100)
self.comFrame.addWidget(self.Eta, 1, 3)
momentlabel = wc.QLabel('Q [fm<sup>2</sup>]:')
self.comFrame.addWidget(momentlabel, 0, 2)
self.Moment = wc.QLineEdit("ND")
self.Moment.setMinimumWidth(100)
self.Moment.textEdited.connect(self.userChange)
self.comFrame.addWidget(self.Moment, 1, 2)
self.comGroup.setLayout(self.comFrame)
self.grid.addWidget(self.comGroup, 1, 0, 1, 3)
self.CqGroup = QtWidgets.QGroupBox("C_Q Convention:")
self.CqFrame = QtWidgets.QGridLayout()
Cqlabel = wc.QLabel(u'C' + u'<sub>Q</sub>/2π [MHz:]')
self.CqFrame.addWidget(Cqlabel, 3, 1)
CqGO = QtWidgets.QPushButton("Go")
self.CqFrame.addWidget(CqGO, 4, 0)
CqGO.clicked.connect(lambda: self.quadCalc(0))
self.Cq = wc.QLineEdit("0")
self.Cq.setMinimumWidth(100)
self.CqFrame.addWidget(self.Cq, 4, 1)
self.CqGroup.setLayout(self.CqFrame)
self.grid.addWidget(self.CqGroup, 3, 0, 1, 2)
self.WqGroup = QtWidgets.QGroupBox(u"ω_Q Convention:")
self.WqFrame = QtWidgets.QGridLayout()
Wqlabel = wc.QLabel(u'ω' + u'<sub>Q</sub>/2π [MHz]:')
self.WqFrame.addWidget(Wqlabel, 6, 1)
WqGO = QtWidgets.QPushButton("Go")
self.WqFrame.addWidget(WqGO, 7, 0)
WqGO.clicked.connect(lambda: self.quadCalc(1))
self.Wq = wc.QLineEdit("0")
self.Wq.setMinimumWidth(100)
self.WqFrame.addWidget(self.Wq, 7, 1)
self.WqGroup.setLayout(self.WqFrame)
self.grid.addWidget(self.WqGroup, 7, 0, 1, 2)
self.fieldGroup = QtWidgets.QGroupBox('Field Gradients:')
self.fieldFrame = QtWidgets.QGridLayout()
Vxxlabel = wc.QLabel('V<sub>xx</sub> [V/m<sup>2</sup>]:')
self.fieldFrame.addWidget(Vxxlabel, 9, 1)
VGO = QtWidgets.QPushButton("Go")
self.fieldFrame.addWidget(VGO, 10, 0)
VGO.clicked.connect(lambda: self.quadCalc(2))
self.Vxx = wc.QLineEdit("ND")
self.Vxx.setMinimumWidth(100)
self.fieldFrame.addWidget(self.Vxx, 10, 1)
Vyylabel = wc.QLabel('V<sub>yy</sub> [V/m<sup>2</sup>]:')
self.fieldFrame.addWidget(Vyylabel, 9, 2)
self.Vyy = wc.QLineEdit("ND")
self.Vyy.setMinimumWidth(100)
self.fieldFrame.addWidget(self.Vyy, 10, 2)
Vzzlabel = wc.QLabel('V<sub>zz</sub> [V/m<sup>2</sup>]:')
self.fieldFrame.addWidget(Vzzlabel, 9, 3)
self.Vzz = wc.QLineEdit("ND")
self.Vzz.setMinimumWidth(100)
self.fieldFrame.addWidget(self.Vzz, 10, 3)
self.fieldGroup.setLayout(self.fieldFrame)
self.grid.addWidget(self.fieldGroup, 8, 0, 1, 4)
# Reset
self.cancelButton.setText("Close")
self.cancelButton.clicked.disconnect()
self.cancelButton.clicked.connect(self.closeEvent)
self.okButton.setText("Reset")
self.okButton.clicked.disconnect()
self.okButton.clicked.connect(self.valueReset)
def setNuc(self, index):
I = self.I[index]
if int(I * 2) > 0:
self.IEntry.setCurrentIndex(int(I * 2 - 2))
self.Moment.setText(str(self.Qvalues[index]))
def userChange(self, index=None):
self.nucDrop.setCurrentIndex(0)
def quadCalc(self, Type):
I = self.Ivalues[self.IEntry.currentIndex()]
if Type == 0: # Cq as input
# Czz is equal to Cq, via same definition (scale) Cxx and Cyy can be found
try:
Cq = float(safeEval(self.Cq.text(), Type='FI'))
Eta = float(safeEval(self.Eta.text(), Type='FI'))
Values = [Cq, Eta]
except Exception:
raise SsnakeException("Quad Conversion: Invalid input in Cq definition")
if Type == 1:
try:
Wq = float(safeEval(self.Wq.text(), Type='FI'))
Eta = float(safeEval(self.Eta.text(), Type='FI'))
Values = [Wq, Eta]
except Exception:
raise SsnakeException("Quad Conversion: Invalid input in Wq definition")
if Type == 2:
try:
Vxx = float(safeEval(self.Vxx.text(), Type='FI'))
Vyy = float(safeEval(self.Vyy.text(), Type='FI'))
Vzz = float(safeEval(self.Vzz.text(), Type='FI'))
Values = [Vxx, Vyy, Vzz]
except Exception:
raise SsnakeException("Quad Conversion: Invalid input in field gradients")
try:
Q = float(safeEval(self.Moment.text(), Type='FI')) * 1e-30 # get moment and convert from fm^2
except Exception:
if Type in (0, 1):
Q = None
else:
raise SsnakeException("Quad Conversion: Invalid input in quadrupole moment Q")
#Do conversion
Result = func.quadConversion(Values, I, Type, Q)
if Result[0][1] is None:
self.Eta.setText('ND')
else:
self.Eta.setText('%#.4g' % Result[0][1])
self.Cq.setText('%#.4g' % Result[0][0])
self.Wq.setText('%#.4g' % Result[1][0])
if Result[2][0] is None:
self.Moment.setText('ND')
self.Vxx.setText('ND')
self.Vyy.setText('ND')
self.Vzz.setText('ND')
else:
self.Vxx.setText('%#.4g' % Result[2][0])
self.Vyy.setText('%#.4g' % Result[2][1])
self.Vzz.setText('%#.4g' % Result[2][2])
def valueReset(self): # Resets all the boxes to 0
self.Cq.setText('0')
self.Eta.setText('0')
self.Wq.setText('0')
self.Moment.setText('ND')
self.Vxx.setText('ND')
self.Vyy.setText('ND')
self.Vzz.setText('ND')
def closeEvent(self, *args):
self.deleteLater()
##############################################################################
class dipolarDistanceWindow(wc.ToolWindow):
NAME = "Dipolar Distance Calculation"
RESIZABLE = True
MENUDISABLE = False
def __init__(self, parent):
super(dipolarDistanceWindow, self).__init__(parent)
self.comGroup = QtWidgets.QGroupBox("Gyromagnetic Ratios:")
self.comFrame = QtWidgets.QGridLayout()
gamma1label = wc.QLabel(u'γ<sub>1</sub> [10<sup>7</sup> rad/s/T]:')
self.comFrame.addWidget(gamma1label, 0, 0)
gammaindex = [x for (x, val) in enumerate(ISOTOPES['gamma']) if val is not None]
self.gammaValues = [0.0] + [val for (x, val) in enumerate(ISOTOPES['gamma']) if x in gammaindex]
self.names = ['User'] + [val for (x, val) in enumerate(ISOTOPES['formatName']) if x in gammaindex]
self.gamma1Drop = QtWidgets.QComboBox()
self.gamma1Drop.addItems(self.names)
self.gamma1Drop.currentIndexChanged.connect(self.setGamma1)
self.comFrame.addWidget(self.gamma1Drop, 1, 0)
self.gamma2Drop = QtWidgets.QComboBox()
self.gamma2Drop.addItems(self.names)
self.gamma2Drop.currentIndexChanged.connect(self.setGamma2)
self.comFrame.addWidget(self.gamma2Drop, 1, 1)
self.gamma1 = wc.QLineEdit("0.0")
self.gamma1.setMinimumWidth(100)
self.gamma1.textEdited.connect(self.gamma1Changed)
self.comFrame.addWidget(self.gamma1, 2, 0)
gamma2label = wc.QLabel(u'γ<sub>2</sub> [10<sup>7</sup> rad/s/T]:')
self.comFrame.addWidget(gamma2label, 0, 1)
self.gamma2 = wc.QLineEdit("0.0")
self.gamma2.setMinimumWidth(100)
self.gamma2.textEdited.connect(self.gamma2Changed)
self.comFrame.addWidget(self.gamma2, 2, 1)
self.comGroup.setLayout(self.comFrame)
self.grid.addWidget(self.comGroup, 1, 0, 1, 3)
self.distanceGroup = QtWidgets.QGroupBox("Distance:")
self.distanceFrame = QtWidgets.QGridLayout()
distancelabel = wc.QLabel(u'r [Å]')
self.distanceFrame.addWidget(distancelabel, 3, 1)
distanceGO = QtWidgets.QPushButton("Go")
self.distanceFrame.addWidget(distanceGO, 4, 0)
distanceGO.clicked.connect(lambda: self.Calc(0))
self.distance = wc.QLineEdit("0")
self.distance.setMinimumWidth(100)
self.distanceFrame.addWidget(self.distance, 4, 1)
self.distanceGroup.setLayout(self.distanceFrame)
self.grid.addWidget(self.distanceGroup, 3, 0, 1, 2)
self.dipolarGroup = QtWidgets.QGroupBox("Dipolar Coupling:")
self.dipolarFrame = QtWidgets.QGridLayout()
dipolarlabel = wc.QLabel(u'D [kHz]')
self.dipolarFrame.addWidget(dipolarlabel, 3, 1)
dipolarGO = QtWidgets.QPushButton("Go")
self.dipolarFrame.addWidget(dipolarGO, 4, 0)
dipolarGO.clicked.connect(lambda: self.Calc(1))
self.dipolar = wc.QLineEdit("0")
self.dipolar.setMinimumWidth(100)
self.dipolarFrame.addWidget(self.dipolar, 4, 1)
self.dipolarGroup.setLayout(self.dipolarFrame)
self.grid.addWidget(self.dipolarGroup, 4, 0, 1, 2)
# Reset
self.cancelButton.setText("Close")
self.cancelButton.clicked.disconnect()
self.cancelButton.clicked.connect(self.closeEvent)
self.okButton.setText("Reset")
self.okButton.clicked.disconnect()
self.okButton.clicked.connect(self.valueReset)
def gamma1Changed(self):
self.gamma1Drop.setCurrentIndex(0)
def gamma2Changed(self):
self.gamma2Drop.setCurrentIndex(0)
def setGamma1(self, index):
if index != 0:
self.gamma1.setText(str(self.gammaValues[index]))
def setGamma2(self, index):
if index != 0:
self.gamma2.setText(str(self.gammaValues[index]))
def Calc(self, Type):
try:
gamma1 = float(safeEval(self.gamma1.text(), Type='FI')) * 1e7
gamma2 = float(safeEval(self.gamma2.text(), Type='FI')) * 1e7
except Exception:
raise SsnakeException("Dipolar Distance: Invalid input in gamma values")
if Type == 0: # Distance as input
try:
r = abs(float(safeEval(self.distance.text(), Type='FI')))
except Exception:
raise SsnakeException("Dipolar Distance: Invalid input in r")
if Type == 1:
try:
D = abs(float(safeEval(self.dipolar.text(), Type='FI')))
except Exception:
raise SsnakeException("Dipolar Distance: Invalid input in D")
hbar = 1.054573e-34
if Type == 0:
if r == 0.0:
D = np.inf
else:
D = abs(- 1e-7 * gamma1 * gamma2 * hbar / (r * 10**-10) **3 / (2 * np.pi))
D /= 1000
if Type == 1:
if D == 0.0:
r = np.inf
else:
r = 1 / abs(D * 1000 /gamma1 / gamma2 / hbar / 1e-7 * (2 * np.pi))**(1.0/3)
r *= 1e10
self.dipolar.setText('%#.5g' % D)
self.distance.setText('%#.5g' % r)
def valueReset(self): # Resets all the boxes to 0
self.dipolar.setText('0.0')
self.distance.setText('0.0')
self.gamma1.setText('0.0')
self.gamma2.setText('0.0')
self.gamma1Drop.setCurrentIndex(0)
self.gamma2Drop.setCurrentIndex(0)
def closeEvent(self, *args):
self.deleteLater()
##############################################################################
class tempCalWindow(QtWidgets.QWidget):
#[minTemp, maxTemp, shiftToTemp, tempToShift, Delta0, T0, RefName]
METHANOL = [178,330,
lambda Delta: 409.0 - 36.54 * Delta - 21.85 * Delta**2,
lambda Temp: (36.54 - np.sqrt(36.54**2 - 4 * -21.85 * (409.0 - Temp))) / (2 * -21.85),
'absShift',None,None,'Ammann et al., JMR, 46, 319 (1982)']
ETH_GLYCOL = [273,416,
lambda Delta: 466.5 - 102.00 * Delta,
lambda Temp: (Temp - 466.5) / -102.0,
'absShift',None,None,'Ammann et al., JMR, 46, 319 (1982)']
PBNO3 = [143, 423,
lambda Delta, Delta0, T0: (Delta0 - Delta) / 0.753 + T0 ,
lambda T, Delta0, T0: (T0 - T) * 0.753 + Delta0 ,
'relShift','-3473','293','Bielecki et al., JMR, 116, 215 (1995)']
KBR = [170, 320,
lambda Delta, Delta0, T0: (Delta0 - Delta) / 0.0250 + T0 ,
lambda T, Delta0, T0: (T0 - T) * 0.0250 + Delta0 ,
'relShift','0','293','Thurber et al., JMR, 196, 84 (2009)']
DEFINITIONS = [METHANOL, ETH_GLYCOL, PBNO3 ,KBR]
TEXTLIST = ['1H: Methanol (178 K < T < 330 K)', '1H: Ethylene Glycol (273 K < T < 416 K)',
'207Pb: Lead Nitrate (143 K < T < 423 K)','79Br: KBr (170 K < T < 320 K)']
T1_KBr = [20,296,
lambda Relax: optimize.brentq(lambda T,T1: 0.0145 + 5330/T**2 + 1.42e7/T**4 + 2.48e9/T**6 - T1, 20, 296 ,args=(Relax,)),
lambda T: 0.0145 + 5330/T**2 + 1.42e7/T**4 + 2.48e9/T**6,
'Thurber et al., JMR, 196, 84 (2009)']
T1_CsI = [8,104,
lambda Relax: optimize.brentq(lambda T,T1: -1.6e-3 + 1.52e3/T**2 + 0.387e6/T**4 + 0.121e9/T**6 - T1, 8, 104 ,args=(Relax,)),
lambda T: -1.6e-3 + 1.52e3/T**2 + 0.387e6/T**4 + 0.121e9/T**6,
'Sarkar et al., JMR, 212, 460 (2011)']
T1_DEFINITIONS = [T1_KBr, T1_CsI]
T1_TEXTLIST = ['79Br: KBr (20 K < T < 296 K, 9.4 T)', '127I: CsI (8 K < T < 104 K, 9.4 T)']
def __init__(self, parent):
super(tempCalWindow, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.Tool)
self.father = parent
self.setWindowTitle("Temperature Calibration")
tabWidget = QtWidgets.QTabWidget()
tab1 = QtWidgets.QWidget()
tabWidget.addTab(tab1, "Chemical Shift Based")
grid1 = QtWidgets.QGridLayout()
tab1.setLayout(grid1)
# Shift based
self.typeDrop = QtWidgets.QComboBox()
self.typeDrop.addItems(self.TEXTLIST)
grid1.addWidget(self.typeDrop, 0, 0)
self.typeDrop.currentIndexChanged.connect(self.changeType)
self.RefGroup = QtWidgets.QGroupBox("Relative to:")
self.RefFrame = QtWidgets.QGridLayout()
self.Delta0Label = wc.QLabel(u'δ [ppm]')
self.RefFrame.addWidget(self.Delta0Label, 0, 0)
self.Delta0 = wc.QLineEdit("")
self.Delta0.setMinimumWidth(100)
self.RefFrame.addWidget(self.Delta0, 1, 0)
self.T0Label = wc.QLabel(u'T [K]')
self.RefFrame.addWidget(self.T0Label, 0, 1)
self.T0 = wc.QLineEdit("")
self.T0.setMinimumWidth(100)
self.RefFrame.addWidget(self.T0, 1, 1)
self.RefGroup.setLayout(self.RefFrame)
grid1.addWidget(self.RefGroup, 1, 0)
self.RefGroup.hide()
self.DeltaGroup = QtWidgets.QGroupBox("Shift to Temperature:")
self.DeltaFrame = QtWidgets.QGridLayout()
self.DeltaLabel = wc.QLabel(u'Δδ [ppm]')
self.DeltaFrame.addWidget(self.DeltaLabel, 0, 1)
DeltaGO = QtWidgets.QPushButton("Go")
self.DeltaFrame.addWidget(DeltaGO, 1, 0)
DeltaGO.clicked.connect(self.shiftToTemp)
self.Delta = wc.QLineEdit("")
self.Delta.setMinimumWidth(100)
self.DeltaFrame.addWidget(self.Delta, 1, 1)
self.DeltaGroup.setLayout(self.DeltaFrame)
grid1.addWidget(self.DeltaGroup, 2, 0)
self.TempGroup = QtWidgets.QGroupBox("Temperature to Shift:")
self.TempFrame = QtWidgets.QGridLayout()
TempLabel = wc.QLabel(u'Temperature [K]')
self.TempFrame.addWidget(TempLabel, 0, 1)
TempGO = QtWidgets.QPushButton("Go")
TempGO.clicked.connect(self.tempToShift)
self.TempFrame.addWidget(TempGO, 1, 0)
self.Temp = wc.QLineEdit("")
self.Temp.setMinimumWidth(100)
self.TempFrame.addWidget(self.Temp, 1, 1)
self.TempGroup.setLayout(self.TempFrame)
grid1.addWidget(self.TempGroup, 3, 0)
self.refname = wc.QLabel(self.DEFINITIONS[0][7])
grid1.addWidget(self.refname, 4, 0)
# T1 based
tab2 = QtWidgets.QWidget()
tabWidget.addTab(tab2, "T1 Based")
grid2 = QtWidgets.QGridLayout()
tab2.setLayout(grid2)
self.T1typeDrop = QtWidgets.QComboBox()
self.T1typeDrop.addItems(self.T1_TEXTLIST)
grid2.addWidget(self.T1typeDrop, 0, 0)
self.T1typeDrop.currentIndexChanged.connect(self.changeTypeT1)
self.T1Group = QtWidgets.QGroupBox("T1 to Temperature:")
self.T1Frame = QtWidgets.QGridLayout()
self.T1Label = wc.QLabel(u'T1 [s]')
self.T1Frame.addWidget(self.T1Label, 0, 1)
T1GO = QtWidgets.QPushButton("Go")
self.T1Frame.addWidget(T1GO, 1, 0)
T1GO.clicked.connect(self.t1ToTemp)
self.T1 = wc.QLineEdit("")
self.T1.setMinimumWidth(100)
self.T1Frame.addWidget(self.T1, 1, 1)
self.T1Group.setLayout(self.T1Frame)
grid2.addWidget(self.T1Group, 2, 0)
self.TempT1Group = QtWidgets.QGroupBox("Temperature to T1:")
self.TempT1Frame = QtWidgets.QGridLayout()
TempT1Label = wc.QLabel(u'Temperature [K]')
self.TempT1Frame.addWidget(TempT1Label, 0, 1)
TempT1GO = QtWidgets.QPushButton("Go")
TempT1GO.clicked.connect(self.tempToT1)
self.TempT1Frame.addWidget(TempT1GO, 1, 0)
self.TempT1 = wc.QLineEdit("")
self.TempT1.setMinimumWidth(100)
self.TempT1Frame.addWidget(self.TempT1, 1, 1)
self.TempT1Group.setLayout(self.TempT1Frame)
grid2.addWidget(self.TempT1Group, 3, 0)
self.refnameT1 = wc.QLabel(self.T1_DEFINITIONS[0][4])
grid2.addWidget(self.refnameT1, 4, 0)
layout = QtWidgets.QGridLayout(self)
layout.addWidget(tabWidget, 0, 0, 1, 4)
cancelButton = QtWidgets.QPushButton("&Close")
cancelButton.clicked.connect(self.closeEvent)
box = QtWidgets.QDialogButtonBox()
box.addButton(cancelButton, QtWidgets.QDialogButtonBox.RejectRole)
layout.addWidget(box, 1, 0, 1, 4)
layout.setColumnStretch(3, 1)
# layout.setRowStretch(3, 1)
self.show()
self.setFixedSize(self.size())
def changeType(self, index):
self.Temp.setText('')
self.Delta.setText('')
self.refname.setText(self.DEFINITIONS[index][7])
if self.DEFINITIONS[self.typeDrop.currentIndex()][4] == 'absShift':
self.DeltaLabel.setText(u'Δδ [ppm]')
self.RefGroup.hide()
self.Delta0.setText('')
self.T0.setText('')
elif self.DEFINITIONS[self.typeDrop.currentIndex()][4] == 'relShift':
self.DeltaLabel.setText(u'δ [ppm]')
self.RefGroup.show()
self.Delta0.setText(self.DEFINITIONS[self.typeDrop.currentIndex()][5])
self.T0.setText(self.DEFINITIONS[self.typeDrop.currentIndex()][6])
self.father.root.processEvents()
self.setFixedSize(self.sizeHint())
def changeTypeT1(self, index):
self.refnameT1.setText(self.T1_DEFINITIONS[index][4])
self.father.root.processEvents()
self.setFixedSize(self.sizeHint())
def tempToT1(self):
Data = self.T1_DEFINITIONS[self.T1typeDrop.currentIndex()]
try:
Temp = float(safeEval(self.TempT1.text(), Type='FI'))
except Exception:
self.T1.setText('?')
raise SsnakeException("Temperature Calibration: Invalid input in Temp value")
T1 = Data[3](Temp)
if Temp < Data[0] or Temp > Data[1]:
self.T1.setText('?')
raise SsnakeException("Temperature Calibration: Temperature outside calibration range")
self.T1.setText('%#.6g' % T1)
def t1ToTemp(self):
Data = self.T1_DEFINITIONS[self.T1typeDrop.currentIndex()]
try:
T1 = float(safeEval(self.T1.text(), Type='FI'))
except Exception:
self.TempT1.setText('?')
raise SsnakeException("Temperature Calibration: Invalid input in Temp value")
try:
Temp = Data[2](T1)
except Exception:
self.TempT1.setText('?')
raise SsnakeException("Temperature Calibration: Temperature outside calibration range")
self.TempT1.setText('%#.6g' % Temp)
def shiftToTemp(self):
Data = self.DEFINITIONS[self.typeDrop.currentIndex()]
try:
Delta = float(safeEval(self.Delta.text(), Type='FI'))
except Exception:
self.Temp.setText('?')
raise SsnakeException("Temperature Calibration: Invalid input in Delta value")
if Data[4] == 'relShift':
try:
Delta0 = float(safeEval(self.Delta0.text(), Type='FI'))
T0 = float(safeEval(self.T0.text(), Type='FI'))
except Exception:
self.Temp.setText('?')
raise SsnakeException("Temperature Calibration: Invalid input in References values")
Temp = Data[2](Delta, Delta0, T0)
else:
Temp = Data[2](Delta)
if Temp < Data[0] or Temp > Data[1]:
self.Temp.setText('?')
raise SsnakeException("Temperature Calibration: Temperature outside calibration range")
self.Temp.setText('%#.6g' % Temp)
def tempToShift(self):
Data = self.DEFINITIONS[self.typeDrop.currentIndex()]
try:
Temp = float(safeEval(self.Temp.text(), Type='FI'))
except Exception:
self.Delta.setText('?')
raise SsnakeException("Temperature Calibration: Invalid input in Temp value")
if Data[4] == 'relShift':
try:
Delta0 = float(safeEval(self.Delta0.text(), Type='FI'))
T0 = float(safeEval(self.T0.text(), Type='FI'))
except Exception:
self.Delta.setText('?')
raise SsnakeException("Temperature Calibration: Invalid input in References values")
Delta = Data[3](Temp, Delta0, T0)
else:
Delta = Data[3](Temp)
if Temp < Data[0] or Temp > Data[1]:
self.Delta.setText('?')
raise SsnakeException("Temperature Calibration: Temperature outside calibration range")
self.Delta.setText('%#.6g' % Delta)
def closeEvent(self, *args):
self.deleteLater()
##############################################################################
class mqmasExtractWindow(wc.ToolWindow):
Ioptions = ['3/2','5/2', '7/2', '9/2']
Ivalues = [1.5, 2.5, 3.5, 4.5]
z = [680.0/27.0, 8500.0/81.0, 6664.0/27.0, 1360.0/3.0]
BdevA = [1.0/68.0, 3.0/850.0, 5.0/3332.0, 1.0/1224.0]
NAME = "MQMAS"
RESIZABLE = True
MENUDISABLE = False
def __init__(self, parent):
super(mqmasExtractWindow, self).__init__(parent)
self.comGroup = QtWidgets.QGroupBox()
self.comFrame = QtWidgets.QGridLayout()
self.comFrame.addWidget(wc.QLabel("I:"), 0, 0)
self.IEntry = QtWidgets.QComboBox()
self.IEntry.addItems(self.Ioptions)
self.IEntry.setCurrentIndex(0)
self.comFrame.addWidget(self.IEntry, 0, 1)
self.comFrame.addWidget(wc.QLabel(u'ν' + '<sub>0</sub> [MHz]'), 1, 0)
self.nu0 = wc.QLineEdit("0.0")
self.comFrame.addWidget(self.nu0, 1, 1)
self.comGroup.setLayout(self.comFrame)
self.grid.addWidget(self.comGroup, 0, 0, 2, 2)
self.onetwoGroup = QtWidgets.QGroupBox("δ1/δ2:")
self.onetwoFrame = QtWidgets.QGridLayout()
self.onetwoFrame.addWidget(wc.QLabel(u'δ' + '<sub>1</sub> [ppm]'), 2, 0)
self.onetwoFrame.addWidget(wc.QLabel(u'δ' + '<sub>2</sub> [ppm]'), 3, 0)
self.delta1 = wc.QLineEdit("0.0")
self.onetwoFrame.addWidget(self.delta1, 2, 1)
self.delta2 = wc.QLineEdit("0.0")
self.onetwoFrame.addWidget(self.delta2, 3, 1)
self.delta1.setMinimumWidth(200)
self.calcIsoPqButton = QtWidgets.QPushButton("Calc δiso/PQ", self)
self.calcIsoPqButton.clicked.connect(self.calcIsoPq)
self.onetwoFrame.addWidget(self.calcIsoPqButton, 4, 0, 1, 2)
self.onetwoGroup.setLayout(self.onetwoFrame)
self.grid.addWidget(self.onetwoGroup, 2, 0, 4, 2)
self.isopqGroup = QtWidgets.QGroupBox("δiso/PQ:")
self.isopqFrame = QtWidgets.QGridLayout()
self.isopqFrame.addWidget(wc.QLabel(u'δ' + '<sub>iso</sub> [ppm]'), 6, 0)
self.deltaIso = wc.QLineEdit("0.0")
self.isopqFrame.addWidget(self.deltaIso, 6, 1)
self.isopqFrame.addWidget(wc.QLabel('P<sub>Q</sub> [MHz]'), 7, 0)
self.pq = wc.QLineEdit("0.0")
self.isopqFrame.addWidget(self.pq, 7, 1)
self.calc12Button = QtWidgets.QPushButton("Calc δ1/δ2", self)
self.calc12Button.clicked.connect(self.calc12)
self.isopqFrame.addWidget(self.calc12Button, 8, 0, 1, 2)
self.isopqGroup.setLayout(self.isopqFrame)
self.grid.addWidget(self.isopqGroup, 6, 0, 4, 2)
self.cancelButton.setText("Close")
self.cancelButton.clicked.disconnect()
self.cancelButton.clicked.connect(self.closeEvent)
self.okButton.setText("Reset")
self.okButton.clicked.disconnect()
self.okButton.clicked.connect(self.valueReset)
def calcIsoPq(self):
nu0 = safeEval(self.nu0.text(), Type='FI')
wrong = False
if nu0 is None:
self.father.dispMsg("MQMAS Extract: Invalid input in V0")
wrong = True
delta1 = safeEval(self.delta1.text(), Type='FI')
if delta1 is None and wrong is False:
self.father.dispMsg("MQMAS Extract: Invalid input in Delta1")
wrong = True
delta2 = safeEval(self.delta2.text(), Type='FI')
if delta2 is None and wrong is False:
self.father.dispMsg("MQMAS Extract: Invalid input in Delta2")
wrong = True
zval = self.z[self.IEntry.currentIndex()]
if wrong is False and delta1 is not None and delta2 is not None:
if delta1 - delta2 < 0.0:
self.father.dispMsg("MQMAS Extract: Delta1 should be larger than Delta2!")
wrong = True
if wrong:
self.deltaIso.setText('-')
self.pq.setText('-')
return
iso = (17.0 * delta1 + 10.0 * delta2) / 27
self.deltaIso.setText(str(iso))
pq = np.sqrt(zval * 1e-6 * (nu0 * 1e6)**2 * (delta1 - delta2)) / 1e6
self.pq.setText(str(pq))
def calc12(self):
nu0 = safeEval(self.nu0.text(), Type='FI')
wrong = False
if nu0 is None or nu0 == 0.0:
self.father.dispMsg("MQMAS Extract: Invalid input in V0")
wrong = True
iso = safeEval(self.deltaIso.text(), Type='FI')
if iso is None and wrong is False:
self.father.dispMsg("MQMAS Extract: Invalid input in DeltaIso")
wrong = True
pq = safeEval(self.pq.text(), Type='FI')
if pq is None and wrong is False:
self.father.dispMsg("MQMAS Extract: Invalid input in PQ")
wrong = True
if wrong:
self.delta1.setText('-')
self.delta2.setText('-')
return
BdevA = self.BdevA[self.IEntry.currentIndex()]
delta1 = iso + BdevA * pq**2/nu0**2 * 1e6
self.delta1.setText(str(delta1))
delta2 = (27 * iso - 17 * delta1) / 10
self.delta2.setText(str(delta2))
def valueReset(self): # Resets all the boxes to 0
self.nu0.setText('0')
self.delta1.setText('0')
self.delta2.setText('0')
self.deltaIso.setText('0')
self.pq.setText('0')
self.IEntry.setCurrentIndex(0)
def closeEvent(self, *args):
self.deleteLater()
def libVersionChecker(version,needed):
"""Compares a two library version strings ('1.2.3' format)
First compares major, then minor, etc.
If version is lower than needed, False is returned
"""
current = [int(x) for x in version.split('.')]
required = [int(x) for x in needed.split('.')]
check = True
if current[0] < required[0]:
check = False
elif current[0] == required[0]:
if current[1] < required[1]:
check = False
elif current[1] == required[1]:
if len(current) > 2 and len(required) > 2:
if current[2] < required[2]:
check = False
return check
def checkVersions():
"""Checks versions of relevant python libraries
Compares specified versions of libraries against the loaded version.
If the values are to low, an error message is returned.
"""
from scipy import __version__ as scipyVersion # Scipy is not fully imported, so only load version
libs = [['numpy', np.__version__, NPVERSION],
['matplotlib', matplotlib.__version__, MPLVERSION],
['scipy', scipyVersion, SPVERSION]]
if sys.version_info.major == 3:
libs.append(['python', str(sys.version_info.major) + '.' + str(sys.version_info.minor), PY3VERSION])
elif sys.version_info.major == 2:
libs.append(['python', str(sys.version_info.major) + '.' + str(sys.version_info.minor), PY2VERSION])
messages = []
error = False
for elem in libs:
check = libVersionChecker(elem[1], elem[2])
if not check:
error = True
messages.append('"' + elem[0] + '" version is too low (need "' + elem[2] + '" have "' + elem[1] +'")')
return error, messages
def popupVersionError(messages):
"""Gives a message window displaying version issues
Input is a list of strings
"""
msg = ""
for elem in messages:
msg = msg + elem + '\n'
reply = QtWidgets.QMessageBox.warning(QtWidgets.QWidget(), 'Invalid software version', msg, QtWidgets.QMessageBox.Ignore, QtWidgets.QMessageBox.Abort)
quit = False
if reply == QtWidgets.QMessageBox.Abort:
quit = True
return quit
def openRefMan():
file = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + '..' + os.path.sep + 'ReferenceManual.pdf'
if sys.platform.startswith('linux'):
os.system("xdg-open " + '"' + file + '"')
elif sys.platform.startswith('darwin'):
os.system("open " + '"' + file + '"')
elif sys.platform.startswith('win'):
os.startfile(file)
def openTutorial():
path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + '..' + os.path.sep + '/Tutorial'
if sys.platform.startswith('linux'):
os.system("xdg-open " + '"' + path + '"')
elif sys.platform.startswith('darwin'):
os.system("open " + '"' + path + '"')
elif sys.platform.startswith('win'):
os.startfile(path)
if __name__ == '__main__':
error, messages = checkVersions()
quit = False
if error:
splash.close()
quit = popupVersionError(messages)
if not quit:
mainProgram = MainProgram(root)
mainProgram.setWindowTitle("ssNake - " + VERSION)
mainProgram.show()
if not error:
splash.finish(mainProgram)
sys._excepthook = sys.excepthook
if len(sys.argv) > 1:
mainProgram.loadData(sys.argv[1:])
def exception_hook(exctype, value, traceback):
if not isinstance(value, Exception): # Do not catch keyboard interrupts
sys._excepthook(exctype, value, traceback)
elif isinstance(value, (sc.SpectrumException, hc.HComplexException, sim.SimException)):
mainProgram.dispMsg(str(value))
else:
mainProgram.dispError([exctype, value, traceback])
sys.excepthook = exception_hook
sys.exit(root.exec_())
| gpl-3.0 |
deepesch/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
yosefm/tracer | examples/tower_gui.py | 1 | 4835 | """
Yet another MayaVi example: a heliostat field. In this example we also use an
embedded Matplotlib figure to show the flux map on request.
"""
import traits.api as t_api
import traitsui.api as tui
from tracer.mayavi_ui.scene_view import TracerScene
import numpy as N
from scipy.constants import degree
from tracer.ray_bundle import RayBundle
from tracer.sources import pillbox_sunshape_directions
from tracer.assembly import Assembly
from tracer.spatial_geometry import roty, rotation_to_z
from tracer.tracer_engine import TracerEngine
from tracer.models.one_sided_mirror import one_sided_receiver
from tracer.models.heliostat_field import HeliostatField, radial_stagger, solar_vector
# For the embedded flux map:
from matplotlib.figure import Figure
from embedded_figure import MPLFigureEditor
import wx
class TowerScene(TracerScene):
# Location of the sun:
sun_az = t_api.Range(0, 180, 90, label="Sun azimuth")
sun_elev = t_api.Range(0, 90, 45, label="Sun elevation")
# Heliostat placement distance:
radial_res = t_api.Float(1., label="Radial distance")
ang_res = t_api.Float(N.pi/8, lable="Angular distance")
# Flux map figure:
fmap = t_api.Instance(Figure)
fmap_btn = t_api.Button(label="Update flux map")
def __init__(self):
self.gen_plant()
TracerScene.__init__(self, self.plant, self.gen_rays())
self.aim_field()
self.set_background((0., 0.5, 1.))
def gen_rays(self):
sun_vec = solar_vector(self.sun_az*degree, self.sun_elev*degree)
rpos = (self.pos + sun_vec).T
direct = N.tile(-sun_vec, (self.pos.shape[0], 1)).T
rays = RayBundle(rpos, direct, energy=N.ones(self.pos.shape[0]))
return rays
def gen_plant(self):
xy = radial_stagger(-N.pi/4, N.pi/4 + 0.0001, self.ang_res, 5, 20, self.radial_res)
self.pos = N.hstack((xy, N.zeros((xy.shape[0], 1))))
self.field = HeliostatField(self.pos, 0.5, 0.5, 0, 10)
self.rec, recobj = one_sided_receiver(1., 1.)
rec_trans = roty(N.pi/2)
rec_trans[2,3] = 10
recobj.set_transform(rec_trans)
self.plant = Assembly(objects=[recobj], subassemblies=[self.field])
@t_api.on_trait_change('sun_az, sun_elev')
def aim_field(self):
self.clear_scene()
rays = self.gen_rays()
self.field.aim_to_sun(self.sun_az*degree, self.sun_elev*degree)
self.set_assembly(self.plant) # Q&D example.
self.set_source(rays)
@t_api.on_trait_change('radial_res, ang_res')
def replace_plant(self):
self.gen_plant()
self.aim_field()
@t_api.on_trait_change('_scene.activated')
def initialize_camere(self):
self._scene.mlab.view(0, -90)
self._scene.mlab.roll(90)
def _fmap_btn_fired(self):
"""Generate a flux map using much more rays than drawn"""
# Generate a large ray bundle using a radial stagger much denser
# than the field.
sun_vec = solar_vector(self.sun_az*degree, self.sun_elev*degree)
hstat_rays = 1000
num_rays = hstat_rays*len(self.field.get_heliostats())
rot_sun = rotation_to_z(-sun_vec)
direct = N.dot(rot_sun, pillbox_sunshape_directions(num_rays, 0.00465))
xy = N.random.uniform(low=-0.25, high=0.25, size=(2, num_rays))
base_pos = N.tile(self.pos, (hstat_rays, 1)).T
base_pos += N.dot(rot_sun[:,:2], xy)
base_pos -= direct
rays = RayBundle(base_pos, direct, energy=N.ones(num_rays))
# Perform the trace:
self.rec.get_optics_manager().reset()
e = TracerEngine(self.plant)
e.ray_tracer(rays, 1000, 0.05)
# Show a histogram of hits:
energy, pts = self.rec.get_optics_manager().get_all_hits()
x, y = self.rec.global_to_local(pts)[:2]
rngx = 0.5
rngy = 0.5
bins = 50
H, xbins, ybins = N.histogram2d(x, y, bins, \
range=([-rngx,rngx], [-rngy,rngy]), weights=energy)
self.fmap.axes[0].images=[]
self.fmap.axes[0].imshow(H, aspect='auto')
wx.CallAfter(self.fmap.canvas.draw)
def _fmap_default(self):
figure = Figure()
figure.add_axes([0.05, 0.04, 0.9, 0.92])
return figure
# Parameters of the form that is shown to the user:
view = tui.View(tui.HGroup(tui.VGroup(
TracerScene.scene_view_item(500, 500),
tui.HGroup('-', 'sun_az', 'sun_elev'),
tui.HGroup('radial_res', 'ang_res'),
tui.Item('fmap_btn', show_label=False)),
tui.Item('fmap', show_label=False, editor=MPLFigureEditor())))
if __name__ == '__main__':
scene = TowerScene()
scene.configure_traits()
| gpl-3.0 |
lisaleemcb/sncosmo_lc_analysis | analyzeSN/snanaio.py | 1 | 5412 | from __future__ import absolute_import, print_function, division
import fitsio
import pandas as pd
import numpy as np
import os
from astropy.table import Table, Column
class SNANASims(object):
"""
Class to represent SNANA simulations of a particular class of objects, ie.
Ia or Non_Ia
"""
def __init__(self, headFile, photFile, coerce_inds2int=True):
"""
Parameters
---------
headFile : string, mandatory
absolute path to head file of simulation
photFile : string, mandatory
absolute path to phot file of simulation
coerce_inds2int : Bool, optional, defaults to True
if true, converts SNID from string to int
"""
self.headFile = headFile
self.photFile = photFile
self.headData = self.get_headData(self.headFile,
coerce_inds2int=coerce_inds2int)
self.phot = fitsio.FITS(photFile)
@classmethod
def fromSNANAfileroot(cls, snanafileroot, location='./',
coerce_inds2int=False):
"""
Class constructor from a root file and a location
Parameters
----------
snanafileroot : string, mandatory
root file name for the SNANA which is the prefix to
'_HEAD.FITS', or '_PHOT.FITS'
location : string, optional defaults to current working directory './'
Relative or absolute path to the directory where the head and phot
files are located
snids : integer/string, optional defaults to None
if not None, only SN observations corresponding to SNID snid
are loaded
n : Integer, defaults to None
if not None, only the first n SN light curves are loaded
"""
headfile = cls.snanadatafile(snanafileroot, filetype='head',
location=location)
photfile = cls.snanadatafile(snanafileroot, filetype='phot',
location=location)
return cls(headFile=headfile, photFile=photfile,
coerce_inds2int=coerce_inds2int)
@staticmethod
def snanadatafile(snanafileroot, filetype='head', location='./'):
'''
obtain the name of the head or phot file of an SNANA simulation
and dataset
Parameters
----------
snanafileroot : string, mandatory
root file name for the SNANA which is the prefix to
'_HEAD.FITS', or '_PHOT.FITS'
filetype : string, optional defaults to 'head'
'head' or 'phot' depending on whether a summary file or a photometry
file is being used.
location : string, optional defaults to current working directory './'
relative or absolute path to the directory in which the file is
located
Returns
-------
string : absolute path to the SNANA file
'''
desiredfiletype = ['head', 'phot']
filetype = filetype.lower()
if not filetype in desiredfiletype:
raise ValueError(
'filetype should be one of "head" or "phot"', filetype)
location = os.path.abspath(location)
suffix = '_HEAD.FITS'
if filetype.lower() == 'phot':
suffix = '_PHOT.FITS'
fname = snanafileroot + suffix
return os.path.join(location, fname)
@staticmethod
def get_headData(headFile, coerce_inds2int=False):
"""
read the headData of a SNANA simulation and return a dataframe
representing the simulation
Parameters
----------
headFile :
coerce_inds2int :
"""
_head = Table.read(headFile)
if _head['SNID'].dtype.type is np.string_:
data = _head['SNID'].data
name = _head['SNID'].name
dtype = _head['SNID'].dtype
if coerce_inds2int:
arr = list(np.int(x) for x in data)
dtype=int
else:
arr = list(x.strip().lower() for x in data)
col = Column(data=arr, name=name, dtype=dtype)
_head.remove_columns('SNID')
_head.add_column(col, index=0)
return _head.to_pandas().set_index('SNID')
def get_photrows(self, row=None, snid=None):
"""
return rows of the photometry table corresponding to a SN as listed
in the head table.
Parameters
----------
row :
snid :
"""
if row is not None:
ptrs = self.headData.iloc[row][['PTROBS_MIN', 'PTROBS_MAX']]
elif snid is not None:
ptrs = self.headData.ix[snid][['PTROBS_MIN', 'PTROBS_MAX']]
else:
raise ValueError('Both {0} and {1} cannot be None simulataneously'.format('snid', 'row'))
ptrs = ptrs.astype('int').values
ptrs[0] -= 1
return ptrs
def get_SNANA_photometry(self, snid=None, ptrs=None):
"""
return the photometry table corresponding to a SN with snid (from the
head table) or the photometry table within the range of row numbers
indicated by ptrs
"""
if ptrs is not None:
assert np.shape(ptrs) == (2,)
elif snid is not None:
ptrs = self.get_photrows(snid=snid.strip().lower())
else:
raise ValueError('Both {0} and {1} cannot be None simulataneously'.format('snid', 'row'))
lcData = self.phot[1][ptrs[0]: ptrs[1]].byteswap().newbyteorder()
return pd.DataFrame(lcData)
| mit |
brentjm/Impurity-Predictions | server/package/desiccant.py | 1 | 4140 | """
Desiccant class
Brent Maranzano
2016-04-16
"""
import pandas as pd
class Desiccant(object):
"""
Define the desiccant inside the container (e.g. type, amount, water...etc).
Class Attributes
ID : string - unique identification number to lookup parameters
name : string - Desiccant material.
mass : float - mass of desiccant (g)
water_content : float - mass fraction of water contained in the desiccant
(mass water (mg) / mass dry desiccant (g))
GAB_parameters : dictionary - {Wm, C, K} GAB constants from lookup file.
density: float - density of dry desiccant (g/cm^3)
water : float - total mass of water (g)
Class Methods
set_properties : Set the properties of the desiccant
refresh : Refresh the desiccant to a new water content
equilibrate : Equilibrate the deisccant to a provided RH and set the water content.
calc_water_content: Calculate the water content from GAB and passed water activity
"""
def __init__(self, ID, mass, **kwargs):
self.set_properties(ID, mass, **kwargs)
def set_properties(self, ID, mass, density=1.0, **kwargs):
"""
Set the properties of the desiccant.
Parameters
ID : string - Unique identification of the desiccant for the lookup.
mass : float - Mass of the desiccant.
optional kwargs
water_content : float - mass fraction of water in desiccant
(mass water (mg) / mass dry desiccant (g))
density : float - density of dry desiccant (g)
"""
store = pd.HDFStore("simulation_constants.hdf", mode="r")
GAB_constants = store["GAB_constants"]
store.close()
if ID in GAB_constants.index.values:
self.ID = ID
self.name = GAB_constants.loc["material"]["name"]
self.GAB_parameters = GAB_constants.loc[ID][["C", "K", "Wm"]].to_dict()
else:
raise ValueError("Desiccant type {} is not defined".format(ID))
self.mass = float(mass)
if "water_content" in kwargs:
self.water_content = float(kwargs["water_content"])
elif "initial_water_activity" in kwargs:
self.water_content = \
self.calc_water_content(kwargs["initial_water_activity"]) * 1.e3
else:
self.water_content = 20.
self.density = float(density)
self.water = self.water_content * self.mass * 1.e-3
def refresh(self, water_content=20., initial_activity=None):
"""
Refresh the desiccant (e.g. replace with equivalent desiccant
mass with lower water content). Specify either new water
content (water_content), or initial water activity of the
desiccant (initial_activity).
Parameters
water_content: float - Water content of the fresh desiccant
(mg water / g dry desiccant)
initial_activity: float - Water activity of the fresh desiccant (unitless)
"""
if initial_activity is None:
self.water_content = float(water_content)
else:
self.water_content = \
self.calc_water_content(float(initial_activity))
self.water = self.water_content * self.mass * 1.e-3
def equilibrate(self, aw):
"""
Equilibrate the desiccant to a new water activity.
Parameters
aw: float - Water activity to equilibrate desiccant.
"""
self.water_content = self.calc_water_content(aw)
self.water = self.water_content * self.mass * 1.e-3
def calc_water_content(self, aw):
"""
Calculate the water content from the GAB parameters at the provided
water activity.
Parameters
aw : float - Water activity
return: water_content (mg water / g desiccant)
"""
aw = float(aw)
Wm = self.GAB_parameters["Wm"]
C = self.GAB_parameters["C"]
K = self.GAB_parameters["K"]
water_content = (Wm*C*K*aw) / ((1-K*aw) * (1-K*aw+C*K*aw)) * 1.e3
return water_content
| bsd-2-clause |
thesuperzapper/tensorflow | tensorflow/contrib/keras/python/keras/callbacks.py | 25 | 33691 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import Iterable
from collections import OrderedDict
import csv
import json
import os
import time
import warnings
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.generic_utils import Progbar
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.python.ops import array_ops
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver as saver_lib
# pylint: disable=g-import-not-at-top
try:
import requests
except ImportError:
requests = None
# pylint: enable=g-import-not-at-top
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
warnings.warn(
'Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0. and
(delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):
warnings.warn(
'Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs)
def __iter__(self):
return iter(self.callbacks)
class Callback(object):
"""Abstract base class used to build new callbacks.
# Properties
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
"""
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
logs[k] = self.totals[k] / self.seen
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seens or steps (batches) seen.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples'):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(target=self.target, verbose=self.verbose)
self.seen = 0
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
if self.use_steps:
self.seen += 1
else:
self.seen += batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values, force=True)
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('Epoch %05d: %s did not improve' % (epoch, self.monitor))
else:
if self.verbose > 0:
print('Epoch %05d: saving model to %s' % (epoch, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto'):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('EarlyStopping mode %s is unknown, '
'fallback to auto mode.' % (self.mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
self.wait = 0 # Allow instances to be re-used
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
if current is None:
warnings.warn('Early stopping requires %s available!' % (self.monitor),
RuntimeWarning)
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
self.wait += 1
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch))
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
headers: Dictionary; optional custom HTTP headers.
Defaults to:
`{'Accept': 'application/json',
'Content-Type': 'application/json'}`
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None):
super(RemoteMonitor, self).__init__()
if headers is None:
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
self.root = root
self.path = path
self.field = field
self.headers = headers
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires ' 'the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
warnings.warn('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
"""
def __init__(self, schedule):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
class TensorBoard(Callback):
# pylint: disable=line-too-long
"""Tensorboard basic visualizations.
This callback writes a log for TensorBoard, which allows
you to visualize dynamic graphs of your training and test
metrics, as well as activation histograms for the different
layers in your model.
Arguments:
log_dir: the path of the directory where to save the log
files to be parsed by Tensorboard.
histogram_freq: frequency (in epochs) at which to compute activation
histograms for the layers of the model. If set to 0,
histograms won't be computed.
write_graph: whether to visualize the graph in Tensorboard.
The log file can become quite large when
write_graph is set to True.
write_images: whether to write model weights to visualize as
image in Tensorboard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.merged = None
self.write_graph = write_graph
self.write_images = write_images
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata or {}
def set_model(self, model):
self.model = model
self.sess = K.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
tf_summary.histogram(weight.name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = w_img.get_shape()
if len(shape) > 1 and shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
if len(shape) == 1:
w_img = array_ops.expand_dims(w_img, 0)
w_img = array_ops.expand_dims(array_ops.expand_dims(w_img, 0), -1)
tf_summary.image(weight.name, w_img)
if hasattr(layer, 'output'):
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
else:
self.writer = tf_summary.FileWriter(self.log_dir)
if self.embeddings_freq:
self.saver = saver_lib.Saver()
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
embeddings = {
layer.name: layer.weights[0]
for layer in self.model.layers if layer.name in embeddings_layer_names
}
embeddings_metadata = {}
if not isinstance(self.embeddings_metadata, str):
embeddings_metadata = self.embeddings_metadata
else:
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings.keys()
}
config = projector.ProjectorConfig()
self.embeddings_logs = []
for layer_name, tensor in embeddings.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
self.embeddings_logs.append(
os.path.join(self.log_dir, layer_name + '.ckpt'))
if layer_name in embeddings_metadata:
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.validation_data and self.histogram_freq:
if epoch % self.histogram_freq == 0:
# TODO(fchollet): implement batched calls to sess.run
# (current call will likely go OOM on GPU)
if self.model.uses_learning_phase:
cut_v_data = len(self.model.inputs)
val_data = self.validation_data[:cut_v_data] + [0]
tensors = self.model.inputs + [K.learning_phase()]
else:
val_data = self.validation_data
tensors = self.model.inputs
feed_dict = dict(zip(tensors, val_data))
result = self.sess.run([self.merged], feed_dict=feed_dict)
summary_str = result[0]
self.writer.add_summary(summary_str, epoch)
if self.embeddings_freq and self.embeddings_logs:
if epoch % self.embeddings_freq == 0:
for log in self.embeddings_logs:
self.saver.save(self.sess, log, epoch)
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, epoch)
self.writer.flush()
def on_train_end(self, _):
self.writer.close()
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
epsilon: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
epsilon=1e-4,
cooldown=0,
min_lr=0):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
self.factor = factor
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode), RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
self.monitor, RuntimeWarning)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr + self.lr_epsilon:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate to %s.' % (epoch,
new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
self.wait += 1
def in_cooldown(self):
return self.cooldown_counter > 0
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename) as f:
self.append_header = not bool(len(f.readline()))
self.csv_file = open(self.filename, 'a')
else:
self.csv_file = open(self.filename, 'w')
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if not self.writer:
self.keys = sorted(logs.keys())
class CustomDialect(csv.excel):
delimiter = self.sep
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=['epoch'] + self.keys,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class LambdaCallback(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Plot the loss after every epoch.
import numpy as np
import matplotlib.pyplot as plt
plot_loss_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: plt.plot(np.arange(epoch),
logs['loss']))
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
plot_loss_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| apache-2.0 |
dongjoon-hyun/neon | examples/fast-rcnn/demo.py | 2 | 5520 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Demo a trained Fast-RCNN model to do object detection using PASCAL VOC dataset.
This demo currently runs 1 image at a time.
Reference:
"Fast R-CNN"
http://arxiv.org/pdf/1504.08083v2.pdf
https://github.com/rbgirshick/fast-rcnn
Usage:
python examples/fast-rcnn/demo.py --model_file frcn_vgg.pkl
Notes:
1. For VGG16 based Fast R-CNN model, we can support testing with batch size as 1
images. The testing consumes about 7G memory.
2. During demo, all the selective search ROIs will be used to go through the network,
so the inference time varies based on how many ROIs in each image.
For PASCAL VOC 2007, the average number of SelectiveSearch ROIs is around 2000.
3. The dataset will cache the preprocessed file and re-use that if the same
configuration of the dataset is used again. The cached file by default is in
~/nervana/data/VOCDevkit/VOC<year>/train_< >.pkl or
~/nervana/data/VOCDevkit/VOC<year>/inference_< >.pkl
"""
import os
import numpy as np
from PIL import Image
from neon.data.pascal_voc import PASCAL_VOC_CLASSES
from neon.data import PASCALVOCInference
from neon.util.argparser import NeonArgparser
from util import create_frcn_model
do_plots = True
try:
import matplotlib.pyplot as plt
plt.switch_backend('agg')
except ImportError:
print('matplotlib needs to be installed manually to generate plots needed '
'for this example. Skipping plot generation')
do_plots = False
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--img_prefix', type=str,
help='prefix for the saved image file names. If None, use '
'the model file name')
args = parser.parse_args(gen_be=True)
assert args.model_file is not None, "need a model file to do Fast R-CNN testing"
if args.img_prefix is None:
args.img_prefix = os.path.splitext(os.path.basename(args.model_file))[0]
output_dir = os.path.join(args.data_dir, 'frcn_output')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# hyperparameters
args.batch_size = 1
n_mb = 40
img_per_batch = args.batch_size
rois_per_img = 5403
# setup dataset
image_set = 'test'
image_year = '2007'
valid_set = PASCALVOCInference(image_set, image_year, path=args.data_dir, n_mb=n_mb,
rois_per_img=rois_per_img, shuffle=False)
# setup model
model = create_frcn_model()
model.load_params(args.model_file)
model.initialize(dataset=valid_set)
CONF_THRESH = 0.8
NMS_THRESH = 0.3
# iterate through minibatches of the dataset
for mb_idx, (x, db) in enumerate(valid_set):
im = np.array(Image.open(db['img_file'])) # This is RGB order
print db['img_id']
outputs = model.fprop(x, inference=True)
scores, boxes = valid_set.post_processing(outputs, db)
# Visualize detections for each class
if do_plots:
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for cls in PASCAL_VOC_CLASSES[1:]:
# pick out scores and bboxes replated to this class
cls_ind = PASCAL_VOC_CLASSES.index(cls)
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[cls_ind]
# only keep that ones with high enough scores
keep = np.where(cls_scores >= CONF_THRESH)[0]
if len(keep) == 0:
continue
# with these, do nonmaximum suppression
cls_boxes = cls_boxes[keep]
cls_scores = cls_scores[keep]
keep = valid_set.nonmaximum_suppression(cls_boxes, cls_scores, NMS_THRESH)
# keep these after nms
cls_boxes = cls_boxes[keep]
cls_scores = cls_scores[keep]
# Draw detected bounding boxes
inds = np.where(cls_scores >= CONF_THRESH)[0]
if len(inds) == 0:
continue
print 'detect {}'.format(cls)
if do_plots:
for i in inds:
bbox = cls_boxes[i]
score = cls_scores[i]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(cls, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
plt.axis('off')
plt.tight_layout()
if do_plots:
fname = os.path.join(output_dir, '{}_{}_{}_{}.png'.format(
args.img_prefix, image_set,
image_year, db['img_id']))
plt.savefig(fname)
plt.close()
| apache-2.0 |
HIPS/pgmult | examples/multinomial_gp.py | 1 | 8439 | """
1D GP with multinomial observations
"""
import os
import time
from collections import namedtuple
import numpy as np
from GPy.kern import RBF
# np.random.seed(1122122122)
from pgmult.gp import MultinomialGP, LogisticNormalGP, EmpiricalStickBreakingGPModel
from pgmult.utils import psi_to_pi, compute_uniform_mean_psi
import matplotlib.pyplot as plt
import brewer2mpl
colors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
K = 4 # Size of output observations
N_max = 10 # Number of observations per input
def initialize_test(N_max=10, true_model_class=MultinomialGP):
D = 1 # Input dimensionality
M_train = 100 # Number of observed training datapoints
M_test = 20 # Number of observed test datapoints
M = M_train + M_test
l = 10.0 # Length scale of GP
L = 120.0 # Length of observation sequence
v = 1.0 # Variance of the GP
# Initialize a grid of points at which to observe GP
N = N_max * np.ones(M, dtype=np.int32)
Z = np.linspace(0,L,M)[:,None]
# Initialize the kernel
kernel = RBF(1, lengthscale=l, variance=v)
# Sample a GP
true_model = true_model_class(K, kernel, D=D)
X, psi = true_model.generate(Z=Z, N=N, full_output=True)
pi = np.array([psi_to_pi(p) for p in psi])
# Split the data into training and test
Dataset = namedtuple("Dataset", ["K", "kernel", "Z", "X", "psi", "pi"])
train = Dataset(K, kernel, Z[:M_train], X[:M_train], psi[:M_train], pi[:M_train])
test = Dataset(K, kernel, Z[M_train:], X[M_train:], psi[M_train:], pi[M_train:])
return train, test
def initialize_interactive_plot(model, train, test):
plot_K = isinstance(model, LogisticNormalGP)
# Make predictions at the training and testing data
pi_train, psi_train, _ = \
model.collapsed_predict(train.Z)
pi_test, psi_test, _ = \
model.collapsed_predict(test.Z)
lim = 5
# PLOT!
plt.ion()
fig, axs = plt.subplots(train.K, 2)
lns = np.zeros((train.K,4), dtype=object)
for k in range(train.K):
if k == train.K-1 and not plot_K:
pass
else:
ax = axs[k,0]
# Plot the training data
ax.plot(train.Z, train.psi[:,k], '-b', lw=2)
lns[k,0] = ax.plot(train.Z, psi_train[:,k], '--b', lw=2)[0]
# Plot the testing data
ax.plot(test.Z, test.psi[:,k], '-r', lw=2)
lns[k,1] = ax.plot(test.Z, psi_test[:,k], '--r', lw=2)[0]
# Plot the zero line
ax.plot(train.Z, np.zeros_like(train.Z), ':k')
ax.plot(test.Z, np.zeros_like(test.Z), ':k')
# ax.set_xlim(0, L)
ax.set_ylim(-lim, lim)
ax.set_title("$\psi_%d$" % (k+1))
ax = axs[k,1]
pi_emp_train = train.X / train.X.sum(axis=1).astype(np.float)[:,None]
ax.bar(train.Z, pi_emp_train[:,k], width=1, color='k')
ax.plot(train.Z, train.pi[:,k], '-b', lw=2)
lns[k,2] = ax.plot(train.Z, pi_train[:,k], '--b', lw=2)[0]
pi_emp_test = test.X / test.X.sum(axis=1).astype(np.float)[:,None]
ax.bar(test.Z, pi_emp_test[:,k], width=1, color='k')
ax.plot(test.Z, test.pi[:,k], '-r', lw=2)
lns[k,3] = ax.plot(test.Z, pi_test[:,k], '--r', lw=2)[0]
# ax.set_xlim(0,)
ax.set_ylim(0,1)
ax.set_title("$\pi_%d$" % (k+1))
plt.show()
plt.pause(1.0)
return lns
def update_plot(lns, model, train, test):
plot_K = isinstance(model, LogisticNormalGP)
# Make predictions at the training and testing data
pi_train, psi_train, _ = \
model.collapsed_predict(train.Z)
pi_test, psi_test, _ = \
model.collapsed_predict(test.Z)
for k in range(K):
if k == K-1 and not plot_K:
pass
else:
lns[k,0].set_data(train.Z, psi_train[:,k])
lns[k,1].set_data(test.Z, psi_test[:,k])
lns[k,2].set_data(train.Z, pi_train[:,k])
lns[k,3].set_data(test.Z, pi_test[:,k])
plt.pause(0.001)
### Inference
Results = namedtuple("Results", ["lls", "pred_lls", "pred_pis", "pred_psis", "timestamps"])
def fit_model(model, train_data, test_data, N_iter=100, lns=None):
if isinstance(model, EmpiricalStickBreakingGPModel):
return fit_empirical_model(model, train_data, test_data)
lls = [model.log_likelihood()]
pred_lls = [model.predictive_log_likelihood(test_data.Z, test_data.X)[0]]
pred_pi, pred_psi, _ = model.collapsed_predict(test_data.Z)
pred_pis = [pred_pi]
pred_psis = [pred_psi]
timestamps = [time.clock()]
for itr in range(N_iter):
print("Iteration ", itr)
model.resample_model()
# Collect samples
lls.append(model.log_likelihood())
pred_lls.append(model.predictive_log_likelihood(test_data.Z, test_data.X)[0])
pred_pi, pred_psi, _ = model.collapsed_predict(test_data.Z)
pred_pis.append(pred_pi)
pred_psis.append(pred_psi)
timestamps.append(time.clock())
# Update plots
if lns is not None:
update_plot(lns, model, train_data, test_data)
# Compute sample mean and std
lls = np.array(lls)
pred_lls = np.array(pred_lls)
pred_pis = np.array(pred_pis)
pred_psis = np.array(pred_psis)
timestamps = np.array(timestamps)
timestamps -= timestamps[0]
return Results(lls, pred_lls, pred_pis, pred_psis, timestamps)
def fit_empirical_model(model, train, test):
empirical_ll, _ = model.predictive_log_likelihood(train.Z, train.X)
empirical_pred_ll, _ = model.predictive_log_likelihood(test.Z, test.X)
pred_pi, pred_psi, _ = model.collapsed_predict(test.Z)
pred_pis = np.array([pred_pi])
pred_psis = np.array([pred_psi])
# return Results(empirical_ll * np.ones(2),
# empirical_pred_ll * np.ones(2), [0, 1])
return Results(empirical_ll * np.ones(2),
empirical_pred_ll * np.ones(2),
pred_pis, pred_psis, [0,1])
if __name__ == "__main__":
train, test = initialize_test(N_max=N_max, true_model_class=MultinomialGP)
# models = [EmpiricalStickBreakingGPModel, LogisticNormalGP, MultinomialGP]
# labels = ["Emp GP", "LN GP", "LSB GP"]
models = [EmpiricalStickBreakingGPModel, MultinomialGP]
labels = ["Emp GP", "LSB GP"]
results = []
do_plot = False
N_samples = 200
for model_class in models:
# Make a test model
model = model_class(train.K, train.kernel, D=1)
model.add_data(train.Z, train.X)
# Initialize from the data
model.initialize_from_data(initialize_to_mle=True)
if isinstance(model, MultinomialGP):
model.data_list[0]["psi"] = train.psi
model.resample_omega()
# Initialize plots
if do_plot:
lns = initialize_interactive_plot(model, train, test)
else:
lns = None
# Inference
res = fit_model(model, train, test,
N_iter=N_samples, lns=lns)
results.append(res)
fig, axs = plt.subplots(1,2)
T_max = np.amax([np.amax(res.timestamps) for res in results])
for ind, (res, label) in \
enumerate(zip(results, labels)):
axs[0].plot(res.timestamps, res.lls, color=colors[ind], label=label)
axs[0].plot([0, T_max],
res.lls[len(res.lls)//2:].mean() * np.ones(2),
linestyle=":",
color=colors[ind])
offset = len(res.pred_lls)//2
axs[1].plot(res.timestamps, res.pred_lls, color=colors[ind], label=label)
axs[1].plot([0, T_max],
res.pred_lls[offset:].mean() * np.ones(2),
linestyle=":",
color=colors[ind])
# Plot the log-sum-exp of the pred_lls
from scipy.misc import logsumexp
expected_pred_ll = logsumexp(res.pred_lls[offset:]) - np.log(len(res.pred_lls)-offset)
axs[1].plot([0, T_max],
expected_pred_ll * np.ones(2),
linestyle="--",
color=colors[ind])
axs[0].set_xlim(-1, T_max)
axs[1].set_xlim(-1, T_max)
plt.legend(loc="lower right")
axs[0].set_xlabel("Time (s)")
axs[0].set_ylabel("Log likelihood")
axs[1].set_xlabel("Time (s)")
axs[1].set_ylabel("Pred. Log likelihood")
plt.ioff()
plt.show()
| mit |
rrahn/gdf_tools | include/seqan/apps/ngs_roi/tool_shed/roi_details.py | 18 | 3825 | #!/usr/bin/env python
"""Generation of detailed ROI reports with larger plots.
This report generation works for hundred of ROIs.
"""
try:
import argparse
except ImportError:
import argparse26 as argparse
import math
import os.path
import sys
import Cheetah.Template
import matplotlib.pyplot as plt
import ngs_roi.app
import ngs_roi.argparse
import ngs_roi.io
PAGE_TPL = """
<html>
<head>
<title>ROI Table</title>
<style type="text/css">
div.plot
{
float: left;
padding: 4px;
margin: 2px;
width: 420px;
}
.plot h2 { margin-top: 3px; margin-bottom: 3px; text-align: center; }
.plot img { display: block; margin: 0 auto; }
</style>
</head>
<body>
<h1>Detailed ROI Report</h1>
#for i, roi in enumerate($records)
<div class="plot">
<h2>${roi.ref}:${roi.start_pos + 1}-${roi.end_pos+1}</h2>
<a href="${href($roi)}" target="dead"><img src="plot_${i}.png" /></a>
<p>
<b>chr:start-end</b> <a href="${href($roi)}" target="dead">${roi.ref}:${roi.start_pos}-${roi.end_pos} ${roi.strand}</a>;
<b>region name</b> ${roi.region_name};
<b>region length</b> ${roi.region_length};
</p>
#if $roi.data
<p>#for j, key in enumerate($data_keys)#<b>$key:</b> ${roi.data[$j]}; #end for#</p>
#end if
</div>
#end for
<iframe name="dead" height="0" width="0"></iframe>
<div><code>$args</code></div>
</body>
</html>
"""
class DetailedRoiGenerator(ngs_roi.app.App):
"""Generate detailed ROI report.
:ivar args:Arguments from the comment line.
"""
def __init__(self, args):
self.args = args
def run(self):
"""Run report generation, return status code.
:return: integer with the result.
"""
print >>sys.stderr, 'Loading ROI'
records = ngs_roi.io.load(self.args.in_file, self.args.max_rois)
keys = records[0].data_keys
self.writeHtml(keys, records)
self.writePlots(records)
return 0
def writePlots(self, records):
COLOR = 'blue'
LINE_WIDTH = .5
LINE_STYLE = '-'
TICK_FONT_SIZE = 8
LABEL_FONT_SIZE = 10
for i, roi in enumerate(records):
file_name = 'plot_%d.png' % i
file_name = os.path.join(self.args.out_dir, file_name)
print >>sys.stderr, 'Writing plot %s' % file_name
plt.figure(figsize=(4, 2.5))
plt.gcf().subplots_adjust(bottom=0.16, left=0.15)
plt.plot(roi.points, color=COLOR, linewidth=LINE_WIDTH, linestyle=LINE_STYLE)
plt.ylim(ymin=0)
if self.args.max_value:
plt.ylim(ymax=self.args.max_value)
plt.tick_params(labelsize=TICK_FONT_SIZE)
plt.ylabel('coverage', fontsize=LABEL_FONT_SIZE, weight='semibold')
plt.xlabel('ROI beginPos', fontsize=LABEL_FONT_SIZE, weight='semibold')
plt.savefig(file_name)
def writeHtml(self, keys, records):
file_name = self.args.out_file
print >>sys.stderr, 'Writing HTML file %s' % file_name
vals = {'args': self.args, 'records': records, 'data_keys': keys,
'href': lambda x: self.buildHref(x.ref, x.start_pos, x.end_pos)}
t = Cheetah.Template.Template(PAGE_TPL, searchList=vals)
with open(file_name, 'wb') as f:
f.write(str(t))
def main():
parser = argparse.ArgumentParser(description='Plot ROI file.')
ngs_roi.argparse.addFileArguments(parser)
ngs_roi.argparse.addPlotGridArguments(parser)
ngs_roi.argparse.addLinkArguments(parser)
args = parser.parse_args()
ngs_roi.argparse.applyFileDefaults(args)
app = DetailedRoiGenerator(args)
return app.run()
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
lin-credible/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
cython-testbed/pandas | pandas/tests/plotting/test_hist_method.py | 1 | 15855 | # coding: utf-8
""" Test cases for .hist method """
import pytest
from pandas import Series, DataFrame
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import numpy as np
from numpy.random import randn
from pandas.plotting._core import grouped_hist
from pandas.plotting._compat import _mpl_ge_2_2_0
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works)
@td.skip_if_no_mpl
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
@pytest.mark.slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist, by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@pytest.mark.slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
@pytest.mark.slow
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
df.height.hist(layout=(1, 1))
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
@pytest.mark.slow
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an `ax` kwarg to the method call
# so we get a warning about an axis being cleared, even
# though we don't explicing pass one, see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.gender,
layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.gender,
layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.category,
layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(
axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
@pytest.mark.slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes()
assert len(axes) == 2
@pytest.mark.slow
def test_hist_by_no_extra_plots(self):
df = self.hist_df
axes = df.height.hist(by=df.gender) # noqa
assert len(self.plt.get_fignums()) == 1
@pytest.mark.slow
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with pytest.raises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_hist_df_legacy(self):
from matplotlib.patches import Rectangle
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.hist_df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 3))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, grid=False)
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert not axes[1, 1].get_visible()
df = DataFrame(randn(100, 1))
_check_plot_works(df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 6))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, layout=(4, 2))
self._check_axes_shape(axes, axes_num=6, layout=(4, 2))
# make sure sharex, sharey is handled
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, sharex=True, sharey=True)
# handle figsize arg
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, figsize=(8, 10))
# check bins argument
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, bins=5)
# make sure xlabelsize and xrot are handled
ser = df[0]
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
tm.close()
# make sure kwargs to hist are handled
if _mpl_ge_2_2_0():
kwargs = {"density": True}
else:
kwargs = {"normed": True}
ax = ser.hist(cumulative=True, bins=4, **kwargs)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = ser.hist(log=True)
# scale of y must be 'log'
self._check_ax_scales(ax, yaxis='log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with pytest.raises(AttributeError):
ser.hist(foo='bar')
@pytest.mark.slow
def test_hist_layout(self):
df = DataFrame(randn(100, 3))
layout_to_expected_size = (
{'layout': None, 'expected_size': (2, 2)}, # default is 2x2
{'layout': (2, 2), 'expected_size': (2, 2)},
{'layout': (4, 1), 'expected_size': (4, 1)},
{'layout': (1, 4), 'expected_size': (1, 4)},
{'layout': (3, 3), 'expected_size': (3, 3)},
{'layout': (-1, 4), 'expected_size': (1, 4)},
{'layout': (4, -1), 'expected_size': (4, 1)},
{'layout': (-1, 2), 'expected_size': (2, 2)},
{'layout': (2, -1), 'expected_size': (2, 2)}
)
for layout_test in layout_to_expected_size:
axes = df.hist(layout=layout_test['layout'])
expected = layout_test['expected_size']
self._check_axes_shape(axes, axes_num=3, layout=expected)
# layout too small for all 4 plots
with pytest.raises(ValueError):
df.hist(layout=(1, 1))
# invalid format for layout
with pytest.raises(ValueError):
df.hist(layout=(1,))
with pytest.raises(ValueError):
df.hist(layout=(-1, -1))
@pytest.mark.slow
# GH 9351
def test_tight_layout(self):
if self.mpl_ge_2_0_1:
df = DataFrame(randn(100, 3))
_check_plot_works(df.hist)
self.plt.tight_layout()
tm.close()
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_grouped_hist_legacy(self):
from matplotlib.patches import Rectangle
df = DataFrame(randn(500, 2), columns=['A', 'B'])
df['C'] = np.random.randint(0, 4, 500)
df['D'] = ['X'] * 500
axes = grouped_hist(df.A, by=df.C)
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
tm.close()
axes = df.hist(by=df.C)
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
tm.close()
# group by a key with single value
axes = df.hist(by='D', rot=30)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self._check_ticks_props(axes, xrot=30)
tm.close()
# make sure kwargs to hist are handled
xf, yf = 20, 18
xrot, yrot = 30, 40
if _mpl_ge_2_2_0():
kwargs = {"density": True}
else:
kwargs = {"normed": True}
axes = grouped_hist(df.A, by=df.C, cumulative=True,
bins=4, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot, **kwargs)
# height of last bin (index 5) must be 1.0
for ax in axes.ravel():
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
height = rects[-1].get_height()
tm.assert_almost_equal(height, 1.0)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
tm.close()
axes = grouped_hist(df.A, by=df.C, log=True)
# scale of y must be 'log'
self._check_ax_scales(axes, yaxis='log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with pytest.raises(AttributeError):
grouped_hist(df.A, by=df.C, foo='bar')
with tm.assert_produces_warning(FutureWarning):
df.hist(by='C', figsize='default')
@pytest.mark.slow
def test_grouped_hist_legacy2(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender_int = np.random.choice([0, 1], size=n)
df_int = DataFrame({'height': height, 'weight': weight,
'gender': gender_int})
gb = df_int.groupby('gender')
axes = gb.hist()
assert len(axes) == 2
assert len(self.plt.get_fignums()) == 2
tm.close()
@pytest.mark.slow
def test_grouped_hist_layout(self):
df = self.hist_df
pytest.raises(ValueError, df.hist, column='weight', by=df.gender,
layout=(1, 1))
pytest.raises(ValueError, df.hist, column='height', by=df.category,
layout=(1, 3))
pytest.raises(ValueError, df.hist, column='height', by=df.category,
layout=(-1, -1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, column='height', by=df.gender,
layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, column='height', by=df.gender,
layout=(2, -1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = df.hist(column='height', by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.hist(column='height', by=df.category, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.hist(column='height', by=df.category,
layout=(4, 2), figsize=(12, 8))
self._check_axes_shape(
axes, axes_num=4, layout=(4, 2), figsize=(12, 8))
tm.close()
# GH 6769
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.hist, column='height', by='classroom', layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
# without column
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, by='classroom')
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.hist(by='gender', layout=(3, 5))
self._check_axes_shape(axes, axes_num=2, layout=(3, 5))
axes = df.hist(column=['height', 'weight', 'category'])
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
@pytest.mark.slow
def test_grouped_hist_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
fig, axes = self.plt.subplots(2, 3)
returned = df.hist(column=['height', 'weight', 'category'], ax=axes[0])
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
returned = df.hist(by='classroom', ax=axes[1])
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
axes = df.hist(column='height', ax=axes)
@pytest.mark.slow
def test_axis_share_x(self):
df = self.hist_df
# GH4089
ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True)
# share x
assert ax1._shared_x_axes.joined(ax1, ax2)
assert ax2._shared_x_axes.joined(ax1, ax2)
# don't share y
assert not ax1._shared_y_axes.joined(ax1, ax2)
assert not ax2._shared_y_axes.joined(ax1, ax2)
@pytest.mark.slow
def test_axis_share_y(self):
df = self.hist_df
ax1, ax2 = df.hist(column='height', by=df.gender, sharey=True)
# share y
assert ax1._shared_y_axes.joined(ax1, ax2)
assert ax2._shared_y_axes.joined(ax1, ax2)
# don't share x
assert not ax1._shared_x_axes.joined(ax1, ax2)
assert not ax2._shared_x_axes.joined(ax1, ax2)
@pytest.mark.slow
def test_axis_share_xy(self):
df = self.hist_df
ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True,
sharey=True)
# share both x and y
assert ax1._shared_x_axes.joined(ax1, ax2)
assert ax2._shared_x_axes.joined(ax1, ax2)
assert ax1._shared_y_axes.joined(ax1, ax2)
assert ax2._shared_y_axes.joined(ax1, ax2)
| bsd-3-clause |
niasand/cool-config | pandas_learn/dataframe_learn.py | 1 | 1262 | #coding: utf-8
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
from log_lib import log
def frame():
data = {'state':['hubei','sichuan','guangzhou'],'year':[2015,2016,2017],'pop':[6,7,8]}
frame = DataFrame(data)
log.notice(frame)
log.notice(DataFrame(data,columns=['year','state','pop','debt']))
log.notice(frame.columns)
log.notice(frame['state'])
log.notice(frame['pop'])
log.notice(frame['year'])
frame['debt'] = 16.5
log.notice(frame)
frame['debt'] = np.arange(3.)
log.notice(frame)
frame2 = DataFrame(data,columns=['year','state','pop','debt'],index=['one','two','three'])
log.notice(frame2)
val = Series([-1.2,-1.3,-1.4],index=['two','three','one'])
frame2['debt'] = val
log.notice(frame2)
frame2['eastern'] = frame2.state == 'hubei'
log.notice(frame2)
log.notice(frame2.index)
def frame3():
pop = {'hubei':{2001:2.4,2002:2.5}, "guangdong":{2000:2.6,2001:2.7}}
frame3 = DataFrame(pop)
log.notice(frame3)
log.notice(frame3.T)
log.notice(DataFrame(pop,index=[2001,2000,2002]))
pdata = {'hubei':frame3['hubei'][:-2],'guangdong':frame3['guangdong'][:-2]}
log.notice(pdata)
if __name__ == '__main__':
frame3()
| mit |
google-research/google-research | caltrain/glm_modeling/analysis.py | 1 | 13115 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GLM modeling main analysis file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
import json
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from six.moves import range
from six.moves import zip
from caltrain.glm_modeling import beta_fit_data
from caltrain.glm_modeling import Datasets
from caltrain.glm_modeling import ece_comparison_dir
from caltrain.glm_modeling import Folds
from caltrain.glm_modeling import glm_fit_data
from caltrain.glm_modeling import GLMModels
from caltrain.glm_modeling import Guo_et_al_data
from caltrain.run_calibration import calibrate
def get_glm_fit_data(n=50):
"""Compute GLM fit across empirical datasets."""
col_dict = collections.defaultdict(list)
for dataset_name, dataset_dict in list(Datasets.items()):
dataset = dataset_dict[Folds.test]
for glm_name, glm_model in list(GLMModels.items()):
print((dataset_name, glm_name))
row_idx = pd.MultiIndex.from_tuples([(dataset_name, glm_name)],
names=('dataset_name', 'glm_name'))
curr_glm_fit_data = dataset.fit_glm_bootstrap(glm_model, n=n)
for metric_name, metric_dict in curr_glm_fit_data.items():
for stat_name, stat_dict in metric_dict.items():
col_idx = (metric_name, stat_name, 'value')
col_dict[col_idx].append(
pd.Series([stat_dict['statistic']], index=row_idx))
col_idx = (metric_name, stat_name, 'lower')
col_dict[col_idx].append(
pd.Series([stat_dict['minmax'][0]], index=row_idx))
col_idx = (metric_name, stat_name, 'upper')
col_dict[col_idx].append(
pd.Series([stat_dict['minmax'][1]], index=row_idx))
for key, val in col_dict.items():
col_dict[key] = functools.reduce(lambda x, y: x.append(y), val)
df_glm_fit = pd.DataFrame(col_dict)
df_glm_fit.columns.names = ['parameter', 'statistic', 'estimate']
def f():
return collections.defaultdict(f)
glm_fit_data_dict = collections.defaultdict(f)
for curr_ds, glm, parameter, statistic, estimate in itertools.product(
Datasets, GLMModels, ['AIC', 'nll', 'b0', 'b1'], ['mean', 'std'],
['value', 'lower', 'upper']):
try:
datum = df_glm_fit.loc[curr_ds.name, glm.name].loc[parameter, statistic,
estimate]
except KeyError:
continue
glm_fit_data_dict[curr_ds.name][
glm.name][parameter][statistic][estimate] = datum
glm_fit_data_dict = json.loads(json.dumps(glm_fit_data_dict))
return {
'data': glm_fit_data_dict,
'dataframe': df_glm_fit,
'metadata': {
'N': n
}
}
def recursive_beta_shift_fit(dataset,
arange,
brange,
n_s,
result_dict=None,
cf=.1,
tol=.00001,
shift=1e-16):
"""Compute MLE estimate of beta distribution with recursive brute-force."""
if max(arange[1] - arange[0], brange[1] - brange[0]) < tol:
return result_dict
result_dict, success = dataset.beta_shift_fit(
arange=arange, brange=brange, n_s=n_s, shift=shift)
if not success:
arange_l = max(arange[0] - ((arange[0] + arange[1]) / 2 - result_dict['a']),
0)
arange_r = arange[1] - ((arange[0] + arange[1]) / 2 - result_dict['a'])
brange_l = max(brange[0] - ((brange[0] + brange[1]) / 2 - result_dict['b']),
0)
brange_r = brange[1] - ((brange[0] + brange[1]) / 2 - result_dict['b'])
else:
arange_l = (1 - cf) * arange[0] + cf * result_dict['a']
arange_r = (1 - cf) * arange[1] + cf * result_dict['a']
brange_l = (1 - cf) * brange[0] + cf * result_dict['b']
brange_r = (1 - cf) * brange[1] + cf * result_dict['b']
return recursive_beta_shift_fit(
dataset, [arange_l, arange_r], [brange_l, brange_r],
n_s,
result_dict=result_dict,
cf=cf,
tol=tol,
shift=shift)
def get_beta_fit_data():
"""Perform MLE of Beta distribution of best fit."""
data_dict_beta_fit = collections.defaultdict(list)
for dataset_name, dataset_dict in list(Datasets.items()):
dataset = dataset_dict[Folds.test]
beta_fit_best_param_dict = {'nll': float('inf')}
for shift in [1e-16]:
print((dataset_name, shift))
beta_fit_p1_dict = recursive_beta_shift_fit(
dataset,
arange=(0, 200),
brange=(0, 50),
n_s=11,
tol=1e-5,
cf=.5,
shift=shift)
if beta_fit_p1_dict['nll'] < beta_fit_best_param_dict['nll']:
beta_fit_best_param_dict = beta_fit_p1_dict
data_dict_beta_fit['dataset_name'].append(dataset_name)
for key, val in beta_fit_best_param_dict.items():
data_dict_beta_fit[key].append(val)
df_beta_fit = pd.DataFrame(data_dict_beta_fit).set_index(['dataset_name'])
print(df_beta_fit)
def f():
return collections.defaultdict(f)
beta_fit_data_dict = collections.defaultdict(f)
for curr_ds, parameter in itertools.product(Datasets,
['a', 'b', 'loc', 'scale', 'p1']):
datum = df_beta_fit.loc[curr_ds.name, parameter]
if isinstance(datum, np.int64):
datum = int(datum)
beta_fit_data_dict[curr_ds.name][parameter] = datum
beta_fit_data_dict = json.loads(json.dumps(beta_fit_data_dict))
return {'data': beta_fit_data_dict, 'dataframe': df_beta_fit}
if __name__ == '__main__':
# Write glm_fit analyis:
data = get_glm_fit_data(n=10)
pickle.dump(data, open('glm_fit_data.p', 'wb'))
# Write beta_fit analyis:
data = get_beta_fit_data()
pickle.dump(data, open('beta_fit_data.p', 'wb'))
# Write glm_fit summary plot:
for curr_Dataset, curr_GLMModel in itertools.product(Datasets, GLMModels):
gm = curr_GLMModel.value
ds = curr_Dataset.value[Folds.test]
save_file_path = os.path.join('glm_fit_figs', curr_Dataset.name)
if not os.path.exists(save_file_path):
os.mkdir(save_file_path)
save_file_name = os.path.join(save_file_path,
'{}.png'.format(curr_GLMModel.name))
fig = gm.plot_fit_sequence(ds, figsize_single=3, fontsize=10)
fig.savefig(save_file_name)
# Write calibration curve plot:
fig, ax = plt.subplots(figsize=(5.1, 3.1))
fontsize = 8
clrs = sns.color_palette('husl', n_colors=len(list(Datasets.items())))
LINE_STYLES = ['solid', 'dashed', 'dashdot', 'dotted']
NUM_STYLES = len(LINE_STYLES)
for ii, (ds_name, ds_dict) in enumerate(Datasets.items()):
ds = ds_dict[Folds.test]
gm_name_AIC_dict = {
gm_name: glm_fit_data['data'][ds_name][gm_name]['AIC']['mean']['value']
for gm_name, gm in GLMModels.items()
}
gm_best_name = min(gm_name_AIC_dict, key=gm_name_AIC_dict.get)
gm_best = {key: val for key, val in GLMModels.items()}[gm_best_name]
gm_best.plot_calibration(
ax,
ds,
plot_yx=ii == 0,
color=clrs[ii],
linestyle=LINE_STYLES[ii % NUM_STYLES],
fontsize=fontsize)
ax.set_title('E[Y | f(x)]')
ax.set_xlabel('Predicted confidence')
ax.set_ylabel('Empirical accuracy')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size': fontsize})
fig.tight_layout(pad=.2, rect=[0, 0.03, 1, 0.95], w_pad=.5)
fig.savefig('calibration_curve.png')
# Write score density plots:
for ds_name, ds_dict in Datasets.items():
ds = ds_dict[Folds.test]
fig, ax = plt.subplots(1, 1, figsize=[3] * 2)
ds.plot_top_score_histogram(ax)
fig.tight_layout(pad=.2, rect=[0, 0.03, 1, 0.95], w_pad=.5)
save_file_path = os.path.join('density_figs', ds_name)
fig.savefig(save_file_path)
# Write ECE comparison plots:
dataset_list = Datasets
N_repeats, n_samples = 100, 1000
fontsize = 8
data = collections.defaultdict(list)
for curr_dataset in dataset_list:
print((curr_dataset.name, 'L1'))
ds = curr_dataset.value[Folds.val]
eece = ds.compute_error(ce_type='ew_ece_bin', norm=1)
guo_ece = Guo_et_al_data['ECE'][curr_dataset]
gm_name_AIC_dict = {
gm_name:
glm_fit_data['data'][curr_dataset.name][gm_name]['AIC']['mean']['value']
for gm_name, gm in GLMModels.items()
}
gm_best_name = min(gm_name_AIC_dict, key=gm_name_AIC_dict.get)
gm_best = {key: val for key, val in GLMModels.items()}[gm_best_name]
tece_best = gm_best.get_calibration_error_beta_dist(
ds, n_samples=n_samples, norm='L1')
config = {
'dataset': curr_dataset.name,
'split': Folds.test,
'calibration_method': 'no_calibration',
'ce_type': 'ew_ece_bin',
'num_bins': 15,
'bin_method': 'equal_width',
'norm': 1,
'num_samples': n_samples
}
beta_hat_poly, nll, AIC = ds.fit_glm(gm_best)
alpha = beta_fit_data['data'][ds.model]['a']
beta = beta_fit_data['data'][ds.model]['b']
p1 = beta_fit_data['data'][ds.model]['p1']
a = beta_hat_poly[0]
b = beta_hat_poly[1]
true_dataset = gm_best.get_true_dist(
n_samples=n_samples, alpha=alpha, beta=beta, a=a, b=b, p1=p1)
sece = np.mean([
calibrate(config, true_dataset=true_dataset) for _ in range(N_repeats)
])
data['dataset'].append(curr_dataset.name)
data['eece_L1'].append(eece)
data['guo_ece'].append(guo_ece)
data['tece_best_L1'].append(tece_best)
data['sece_L1'].append(sece / 100)
for curr_dataset in dataset_list:
print((curr_dataset.name, 'L2'))
ds = curr_dataset.value[Folds.val]
eece = ds.compute_error(ce_type='ew_ece_bin', norm=2)
guo_ece = Guo_et_al_data['ECE'][curr_dataset]
gm_name_AIC_dict = {
gm_name:
glm_fit_data['data'][curr_dataset.name][gm_name]['AIC']['mean']['value']
for gm_name, gm in GLMModels.items()
}
gm_best_name = min(gm_name_AIC_dict, key=gm_name_AIC_dict.get)
gm_best = {key: val for key, val in GLMModels.items()}[gm_best_name]
tece_best = gm_best.get_calibration_error_beta_dist(
ds, n_samples=n_samples, norm='L2')
config = {
'dataset': curr_dataset.name,
'split': Folds.test,
'calibration_method': 'no_calibration',
'ce_type': 'ew_ece_bin',
'num_bins': 15,
'bin_method': 'equal_width',
'norm': 2,
'num_samples': n_samples
}
beta_hat_poly, nll, AIC = ds.fit_glm(gm_best)
alpha = beta_fit_data['data'][ds.model]['a']
beta = beta_fit_data['data'][ds.model]['b']
p1 = beta_fit_data['data'][ds.model]['p1']
a = beta_hat_poly[0]
b = beta_hat_poly[1]
true_dataset = gm_best.get_true_dist(
n_samples=n_samples, alpha=alpha, beta=beta, a=a, b=b, p1=p1)
sece = np.mean([
calibrate(config, true_dataset=true_dataset) for _ in range(N_repeats)
])
data['eece_L2'].append(eece)
data['tece_best_L2'].append(tece_best)
data['sece_L2'].append(sece / 100)
df = pd.DataFrame(data)
clrs = sns.color_palette('husl', n_colors=len(df))
data_cols = [c for c in df.columns if c != 'dataset']
for xlabel, ylabel in itertools.combinations(data_cols, 2):
print((xlabel, ylabel))
fig, ax = plt.subplots(1, 1, figsize=(5, 3))
xmax = max(np.max(df[xlabel]), np.max(df[ylabel]))
ax.plot([0, .3], [0, .3], 'r--')
for ii, (xi, yi) in enumerate(zip(df[xlabel], df[ylabel])):
ax.plot([xi], [yi], '*', color=clrs[ii], label=df.loc[ii]['dataset'])
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
ax.grid(which='both', color='lightgray', linestyle='-')
ax.legend(
loc='center left', bbox_to_anchor=(1, 0.5), prop={'size': fontsize})
ax.set_xlim([0, .3])
ax.set_ylim([0, .3])
fig.tight_layout(pad=.2, rect=[0, 0.03, 1, 0.95], w_pad=.5)
save_file_name = os.path.join(ece_comparison_dir,
'{}_{}.png'.format(xlabel, ylabel))
fig.savefig(save_file_name)
| apache-2.0 |
nwhidden/ND101-Deep-Learning | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
ContinuumIO/blaze | blaze/compute/pandas.py | 3 | 23992 | """
>>> from blaze.expr import symbol
>>> from blaze.compute.pandas import compute
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> deadbeats = accounts[accounts['amount'] < 0]['name']
>>> from pandas import DataFrame
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> df = DataFrame(data, columns=['name', 'amount'])
>>> compute(deadbeats, df)
1 Bob
2 Charlie
Name: name, dtype: object
"""
from __future__ import absolute_import, division, print_function
from datetime import timedelta
import fnmatch
import itertools
from distutils.version import LooseVersion
import warnings
from collections import defaultdict, Iterable
import numpy as np
import pandas as pd
from pandas.core.generic import NDFrame
from pandas import DataFrame, Series
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
from toolz import merge as merge_dicts
from toolz import groupby
from toolz.curried import pipe, filter, map, concat
import datashape
from datashape import to_numpy_dtype
from datashape.predicates import isscalar
from odo import into
try:
import dask.dataframe as dd
DaskDataFrame = dd.DataFrame
DaskSeries = dd.Series
except ImportError:
dd = None
DaskDataFrame = pd.DataFrame
DaskSeries = pd.Series
from .core import compute, compute_up, base
from .varargs import VarArgs
from ..compatibility import _inttypes
from ..dispatch import dispatch
from ..expr import (
Apply,
BinOp,
Broadcast,
By,
Ceil,
Coalesce,
Coerce,
Concat,
DateTime,
DateTimeTruncate,
Distinct,
ElemWise,
Expr,
Field,
Floor,
Head,
Interp,
IsIn,
Join,
Label,
Like,
Map,
Merge,
Millisecond,
Pad,
Projection,
ReLabel,
Reduction,
Replace,
Round,
Sample,
seconds,
Selection,
Shift,
Slice,
Sort,
strftime,
Summary,
Tail,
UTCFromTimestamp,
UnaryOp,
UnaryStringFunction,
common_subexpression,
count,
isnan,
nelements,
notnull,
nunique,
std,
summary,
symbol,
var,
StrCat,
StrFind,
StrSlice,
SliceReplace,
total_seconds,
)
__all__ = []
@dispatch(Projection, (DataFrame, DaskDataFrame))
def compute_up(t, df, **kwargs):
return df[list(t.fields)]
@dispatch(Field, (DataFrame, DataFrameGroupBy, DaskDataFrame))
def compute_up(t, df, **kwargs):
assert len(t.fields) == 1
return df[t.fields[0]]
@dispatch(Field, (Series, DaskSeries))
def compute_up(t, data, **kwargs):
assert len(t.fields) == 1
if t.fields[0] == data.name:
return data
else:
raise ValueError("Fieldname %r does not match Series name %r"
% (t.fields[0], data.name))
@dispatch(Broadcast, (DataFrame, DaskDataFrame))
def compute_up(t, df, **kwargs):
return compute(t._full_expr, df, return_type='native')
@dispatch(Broadcast, Series)
def compute_up(t, s, **kwargs):
return compute_up(t, s.to_frame(), **kwargs)
@dispatch(Interp, Series)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return data % t.rhs
else:
return t.lhs % data
@compute_up.register(Interp, Series, (Series, base))
@compute_up.register(Interp, base, Series)
def compute_up_pd_interp(t, lhs, rhs, **kwargs):
return lhs % rhs
@dispatch(BinOp, (Series, DaskSeries))
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return t.op(data, t.rhs)
else:
return t.op(t.lhs, data)
@compute_up.register(BinOp, (Series, DaskSeries), (Series, base, DaskSeries))
@compute_up.register(BinOp, (Series, base, DaskSeries), (Series, DaskSeries))
def compute_up_binop(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(UnaryOp, NDFrame)
def compute_up(t, df, **kwargs):
f = getattr(t, 'op', getattr(np, t.symbol, None))
if f is None:
raise ValueError('%s is not a valid operation on %s objects' %
(t.symbol, type(df).__name__))
return f(df)
@dispatch(Selection, (Series, DataFrame, DaskSeries, DaskDataFrame))
def compute_up(expr, df, **kwargs):
return compute_up(
expr,
df,
compute(expr.predicate, {expr._child: df}, return_type='native'),
**kwargs
)
@dispatch(Selection, (Series, DataFrame, DaskSeries, DaskDataFrame),
(Series, DaskSeries))
def compute_up(expr, df, predicate, **kwargs):
return df[predicate]
@dispatch(Join, DataFrame, DataFrame)
def compute_up(t, lhs, rhs, **kwargs):
""" Join two pandas data frames on arbitrary columns
The approach taken here could probably be improved.
To join on two columns we force each column to be the index of the
dataframe, perform the join, and then reset the index back to the left
side's original index.
"""
result = pd.merge(
lhs,
rhs,
left_on=t.on_left,
right_on=t.on_right,
how=t.how,
suffixes=t.suffixes,
)
return result.reset_index()[t.fields]
@dispatch(isnan, pd.Series)
def compute_up(expr, data, **kwargs):
return data.isnull()
@dispatch(notnull, pd.Series)
def compute_up(expr, data, **kwargs):
return data.notnull()
pandas_structure = DataFrame, DaskDataFrame, Series, DataFrameGroupBy, SeriesGroupBy
@dispatch(Concat, pandas_structure, pandas_structure)
def compute_up(t, lhs, rhs, _concat=pd.concat, **kwargs):
if not (isinstance(lhs, type(rhs)) or isinstance(rhs, type(lhs))):
raise TypeError('lhs and rhs must be the same type')
return _concat((lhs, rhs), axis=t.axis, ignore_index=True)
def get_scalar(result):
# pandas may return an int, numpy scalar or non scalar here so we need to
# program defensively so that things are JSON serializable
try:
return result.item()
except (AttributeError, ValueError):
if isinstance(result, dd.core.Scalar):
return result.compute()
return result
@dispatch(Reduction, (Series, SeriesGroupBy, DaskSeries))
def compute_up(t, s, **kwargs):
result = get_scalar(getattr(s, t.symbol)())
if t.keepdims:
result = Series([result], name=s.name)
return result
@dispatch((std, var), (Series, SeriesGroupBy, DaskSeries))
def compute_up(t, s, **kwargs):
measure = t.schema.measure
is_timedelta = isinstance(
getattr(measure, 'ty', measure),
datashape.TimeDelta,
)
if is_timedelta:
# part 1 of 2 to work around the fact that pandas does not have
# timedelta var or std: cast to a double
s = s.astype('timedelta64[s]').astype('int64')
result = get_scalar(getattr(s, t.symbol)(ddof=t.unbiased))
if t.keepdims:
result = Series([result], name=s.name)
if is_timedelta:
# part 2 of 2 to work around the fact that postgres does not have
# timedelta var or std: cast back from seconds by creating a timedelta
result = timedelta(seconds=result)
return result
@dispatch(Distinct, DataFrame)
def compute_up(t, df, **kwargs):
return df.drop_duplicates(subset=t.on or None).reset_index(drop=True)
@dispatch(Distinct, Series)
def compute_up(t, s, **kwargs):
if t.on:
raise ValueError('malformed expression: no columns to distinct on')
return s.drop_duplicates().reset_index(drop=True)
@dispatch(nunique, DataFrame)
def compute_up(expr, data, **kwargs):
return compute_up(expr._child.distinct().count(), data, **kwargs)
@dispatch(UnaryStringFunction, Series)
def compute_up(expr, data, **kwargs):
name = type(expr).__name__
return getattr(data.str, name)()
@dispatch(Replace, Series)
def compute_up(expr, data, **kwargs):
max = expr.max is None and -1 or expr.max
return data.str.replace(expr.old, expr.new, max)
@dispatch(Pad, Series)
def compute_up(expr, data, **kwargs):
side = expr.side is None and 'left' or expr.side
fillchar = expr.fillchar is None and ' ' or expr.fillchar
return data.str.pad(expr.width, side, fillchar)
@dispatch(StrCat, Series, Series)
def compute_up(expr, lhs_data, rhs_data, **kwargs):
res = lhs_data.str.cat(rhs_data, sep=expr.sep)
return res
@dispatch(StrFind, Series)
def compute_up(expr, data, **kwargs):
return data.str.find(expr.sub)
@dispatch(StrSlice, Series)
def compute_up(expr, data, **kwargs):
if isinstance(expr.slice, tuple):
return data.str[slice(*expr.slice)]
return data.str[expr.slice]
@dispatch(SliceReplace, Series)
def compute_up(expr, data, **kwargs):
return data.str.slice_replace(expr.start, expr.stop, expr.repl)
def unpack(seq):
""" Unpack sequence of length one
>>> unpack([1, 2, 3])
[1, 2, 3]
>>> unpack([1])
1
"""
seq = list(seq)
if len(seq) == 1:
seq = seq[0]
return seq
Grouper = ElemWise, Series, list
@dispatch(By, list, DataFrame)
def get_grouper(c, grouper, df):
return grouper
@dispatch(By, Expr, NDFrame)
def get_grouper(c, grouper, df):
g = compute(grouper, {c._child: df}, return_type='native')
if isinstance(g, Series):
return g
if isinstance(g, DataFrame):
return [g[col] for col in g.columns]
@dispatch(By, (Field, Projection), NDFrame)
def get_grouper(c, grouper, df):
return grouper.fields
@dispatch(By, Reduction, Grouper, NDFrame)
def compute_by(t, r, g, df):
names = [r._name]
preapply = compute(r._child, {t._child: df}, return_type='native')
# Pandas and Blaze column naming schemes differ
# Coerce DataFrame column names to match Blaze's names
preapply = preapply.copy()
if isinstance(preapply, Series):
preapply.name = names[0]
else:
preapply.names = names
group_df = concat_nodup(df, preapply)
gb = group_df.groupby(g)
groups = gb[names[0] if isscalar(t.apply._child.dshape.measure) else names]
return compute_up(r, groups, return_type='native') # do reduction
name_dict = dict()
seen_names = set()
def _name(expr):
""" A unique and deterministic name for an expression """
if expr in name_dict:
return name_dict[expr]
result = base = expr._name or '_'
if result in seen_names:
for i in itertools.count(1):
result = '%s_%d' % (base, i)
if result not in seen_names:
break
# result is an unseen name
seen_names.add(result)
name_dict[expr] = result
return result
def fancify_summary(expr):
""" Separate a complex summary into two pieces
Helps pandas compute_by on summaries
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> one, two, three = fancify_summary(summary(a=t.x.sum(), b=t.x.sum() + t.y.count() - 1))
A simpler summary with only raw reductions
>>> one
summary(x_sum=sum(t.x), y_count=count(t.y))
A mapping of those names to new leaves to use in another compuation
>>> two # doctest: +SKIP
{'x_sum': x_sum, 'y_count': y_count}
A mapping of computations to do for each column
>>> three # doctest: +SKIP
{'a': x_sum, 'b': (x_sum + y_count) - 1}
In this way, ``compute_by`` is able to do simple pandas reductions using
groups.agg(...) and then do columnwise arithmetic afterwards.
"""
seen_names.clear()
name_dict.clear()
exprs = pipe(expr.values,
map(Expr._traverse),
concat,
filter(lambda x: isinstance(x, Reduction)),
set)
one = summary(**dict((_name(expr), expr) for expr in exprs))
two = dict((_name(expr), symbol(_name(expr), datashape.var * expr.dshape))
for expr in exprs)
d = dict((expr, two[_name(expr)]) for expr in exprs)
three = dict((name, value._subs(d)) for name, value in zip(expr.names,
expr.values))
return one, two, three
@dispatch(By, Summary, Grouper, NDFrame)
def compute_by(t, s, g, df):
one, two, three = fancify_summary(s) # see above
names_columns = list(zip(one.fields, one.values))
func = lambda x: not isinstance(x[1], count)
is_field = defaultdict(lambda: iter([]), groupby(func, names_columns))
preapply = DataFrame(dict(
zip([name for name, _ in is_field[True]],
[compute(col._child, {t._child: df}, return_type='native')
for _, col in is_field[True]])
)
)
if list(is_field[False]):
emptys = DataFrame([0] * len(df.index),
index=df.index,
columns=[name for name, _ in is_field[False]])
preapply = concat_nodup(preapply, emptys)
if not df.index.equals(preapply.index):
df = df.loc[preapply.index]
df2 = concat_nodup(df, preapply)
groups = df2.groupby(g)
d = dict((name, v.symbol) for name, v in zip(one.names, one.values))
result = groups.agg(d)
scope = dict((v, result[k]) for k, v in two.items())
cols = [
compute(expr.label(name), scope, return_type='native')
for name, expr in three.items()
]
result2 = pd.concat(cols, axis=1)
# Rearrange columns to match names order
result3 = result2[
sorted(result2.columns, key=lambda t, s=s: s.fields.index(t))
]
return result3
@dispatch(Expr, (DataFrame, DaskDataFrame))
def post_compute_by(t, df):
return df.reset_index(drop=True)
@dispatch((Summary, Reduction), (DataFrame, DaskDataFrame))
def post_compute_by(t, df):
return df.reset_index()
@dispatch(By, NDFrame)
def compute_up(t, df, **kwargs):
grouper = get_grouper(t, t.grouper, df)
result = compute_by(t, t.apply, grouper, df)
result2 = post_compute_by(t.apply, into(DataFrame, result))
if isinstance(result2, DataFrame):
result2.columns = t.fields
return result2
def concat_nodup(a, b):
""" Concatenate two dataframes/series without duplicately named columns
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Charlie', 300]],
... columns=['id','name', 'amount'])
>>> concat_nodup(df, df)
id name amount
0 1 Alice 100
1 2 Bob -200
2 3 Charlie 300
>>> concat_nodup(df.name, df.amount)
name amount
0 Alice 100
1 Bob -200
2 Charlie 300
>>> concat_nodup(df, df.amount + df.id)
id name amount 0
0 1 Alice 100 101
1 2 Bob -200 -198
2 3 Charlie 300 303
"""
if isinstance(a, DataFrame) and isinstance(b, DataFrame):
return pd.concat([a, b[[c for c in b.columns if c not in a.columns]]],
axis=1)
if isinstance(a, DataFrame) and isinstance(b, Series):
if b.name not in a.columns:
return pd.concat([a, b], axis=1)
else:
return a
if isinstance(a, Series) and isinstance(b, DataFrame):
return pd.concat([a, b[[c for c in b.columns if c != a.name]]], axis=1)
if isinstance(a, Series) and isinstance(b, Series):
if a.name == b.name:
return a
else:
return pd.concat([a, b], axis=1)
pdsort = getattr(
pd.DataFrame,
'sort' if LooseVersion(pd.__version__) < '0.17.0' else 'sort_values'
)
@dispatch(Sort, DataFrame)
def compute_up(t, df, **kwargs):
return pdsort(df, t.key, ascending=t.ascending)
@dispatch(Sort, Series)
def compute_up(t, s, **kwargs):
try:
return s.sort_values(ascending=t.ascending)
except AttributeError:
return s.order(ascending=t.ascending)
@dispatch(Sample, (Series, DataFrame))
def compute_up(t, df, **kwargs):
from math import modf
if t.frac is not None:
# Work around annoying edge case: Python's round() builtin (which
# Pandas' sample() uses) rounds 0.5, 2.5, 4.5, ... down to 0, 2, 4, ...,
# while it rounds 1.5, 3.5, 5.5, ... up. This is inconsistent with any
# sane implementation of floating point rounding, including SQL's, so
# we work around it here.
fractional, integral = modf(t.frac * df.shape[0])
n = int(integral + (0 if fractional < 0.5 else 1))
else:
n = min(t.n, df.shape[0])
return df.sample(n=n)
@dispatch(Sample, (DaskDataFrame, DaskSeries))
def compute_up(t, df, **kwargs):
# Dask doesn't support sample(n=...), only sample(frac=...), so we have a
# separate dispatch for dask objects.
if t.frac is not None:
frac = t.frac
else:
nrows = len(df)
frac = float(min(t.n, nrows)) / nrows
return df.sample(frac=frac)
@dispatch(Head, (Series, DataFrame, DaskDataFrame, DaskSeries))
def compute_up(t, df, **kwargs):
return df.head(t.n)
@dispatch(Tail, (Series, DataFrame, DaskDataFrame, DaskSeries))
def compute_up(t, df, **kwargs):
return df.tail(t.n)
@dispatch(Label, DataFrame)
def compute_up(t, df, **kwargs):
return type(df)(df, columns=[t.label])
@dispatch(Label, Series)
def compute_up(t, df, **kwargs):
return Series(df, name=t.label)
@dispatch(ReLabel, (DataFrame, DaskDataFrame))
def compute_up(t, df, **kwargs):
return df.rename(columns=dict(t.labels))
@dispatch(ReLabel, (Series, DaskSeries))
def compute_up(t, s, **kwargs):
labels = t.labels
if len(labels) > 1:
raise ValueError('You can only relabel a Series with a single name')
pair, = labels
_, replacement = pair
return Series(s, name=replacement)
@dispatch(Map, DataFrame)
def compute_up(t, df, **kwargs):
return df.apply(lambda tup: t.func(*tup), axis=1)
@dispatch(Map, Series)
def compute_up(t, df, **kwargs):
result = df.map(t.func)
try:
result.name = t._name
except NotImplementedError:
# We don't have a schema, but we should still be able to map
result.name = df.name
return result
@dispatch(Apply, (Series, DataFrame))
def compute_up(t, df, **kwargs):
return t.func(df)
def _merge(module):
"""Helper to dispatch merge on both pandas and dask structures while
respecting the return type of each.
If dask structures are given, a dask structure is returned, if a pandas
structure is given, a pandas structure is returned.
Parameters
----------
module : module
Either ``pandas`` or ``dask``.
"""
if module is None:
return
@dispatch(Merge, VarArgs[module.Series, base])
def compute_up(expr, args, **kwargs):
"""Optimized handler when we know the sequence doesn't contain tabular
elements.
"""
fields = expr.fields
return module.DataFrame(
dict(zip(fields, args)),
# pass the columns explicitly so that the order is correct
columns=fields,
)
@dispatch(Merge, VarArgs[module.Series, module.DataFrame, base])
def compute_up(expr, args, **kwargs):
for arg in args:
try:
ix = arg.index
except AttributeError:
pass
else:
break
return module.concat(
[
pd.Series(arg, index=ix, name=e._name)
if isinstance(arg, base) else
arg
for e, arg in zip(expr.args, args)
],
axis=1,
)
_merge(pd)
_merge(dd)
del _merge
@dispatch(Summary, (DataFrame, DaskDataFrame))
def compute_up(expr, data, **kwargs):
values = [
compute(val, {expr._child: data}, return_type='native')
for val in expr.values
]
if expr.keepdims:
return type(data)([values], columns=expr.fields)
else:
return Series(dict(zip(expr.fields, values)))
@dispatch(Summary, (Series, DaskSeries))
def compute_up(expr, data, **kwargs):
result = tuple(
compute(val, {expr._child: data}, return_type='native')
for val in expr.values
)
if expr.keepdims:
result = [result]
return result
@dispatch(Like, Series)
def compute_up(expr, data, **kwargs):
return data.str.contains(r'^%s$' % fnmatch.translate(expr.pattern))
def get_date_attr(s, attr, name):
try:
result = getattr(s.dt, attr) # new in pandas 0.15
except AttributeError:
result = getattr(pd.DatetimeIndex(s), attr)
result.name = name
return result
@dispatch(DateTime, Series)
def compute_up(expr, s, **kwargs):
return get_date_attr(s, expr.attr, expr._name)
@dispatch(total_seconds, Series)
def compute_up(expr, s, **kwargs):
result = s.dt.total_seconds()
result.name = expr._name
return result
@dispatch(UTCFromTimestamp, Series)
def compute_up(expr, s, **kwargs):
return pd.datetools.to_datetime(s * 1e9, utc=True)
@dispatch(Millisecond, Series)
def compute_up(expr, s, **kwargs):
return get_date_attr(s, 'microsecond',
'%s_millisecond' % expr._child._name) // 1000
@dispatch(Round, Series)
def compute_up(expr, data, **kwargs):
return data.dt.round(expr.freq)
@dispatch(Ceil, Series)
def compute_up(expr, data, **kwargs):
return data.dt.ceil(expr.freq)
@dispatch(Floor, Series)
def compute_up(expr, data, **kwargs):
return data.dt.floor(expr.freq)
@dispatch(strftime, Series)
def compute_up(expr, data, **kwargs):
return data.dt.strftime(expr.format)
@dispatch(Slice, (DataFrame, Series))
def compute_up(expr, df, **kwargs):
index = expr.index
if isinstance(index, tuple) and len(index) == 1:
index = index[0]
if isinstance(index, _inttypes + (list,)):
return df.iloc[index]
elif isinstance(index, slice):
if index.stop is not None:
return df.iloc[index.start:index.stop:index.step]
else:
return df.iloc[index]
else:
raise NotImplementedError()
@dispatch(count, DataFrame)
def compute_up(expr, df, **kwargs):
result = df.shape[0]
if expr.keepdims:
result = Series([result], name=expr._name)
return result
@dispatch(nelements, (DataFrame, Series))
def compute_up(expr, df, **kwargs):
return df.shape[0]
@dispatch((count, nelements), (DaskDataFrame, DaskSeries))
def compute_up(expr, df, **kwargs):
warnings.warn("Counting the elements of a dask object can be slow.")
result = len(df)
if expr.keepdims:
result = DaskSeries([result], name=expr._name)
return result
@dispatch(DateTimeTruncate, Series)
def compute_up(expr, data, **kwargs):
return Series(compute_up(expr, into(np.ndarray, data), **kwargs),
name=expr._name)
@dispatch(IsIn, (Series, DaskSeries), Iterable)
def compute_up(expr, data, keys, **kwargs):
return data.isin(keys)
@dispatch(Coerce, (Series, DaskSeries))
def compute_up(expr, data, **kwargs):
measure = expr.to.measure
if measure in {datashape.string, datashape.Option(datashape.string)}:
return data.astype(str)
elif measure in {datashape.datetime_,
datashape.Option(datashape.datetime_)}:
return data.astype(np.datetime64)
return data.astype(to_numpy_dtype(expr.schema))
@dispatch(Shift, Series)
def compute_up(expr, data, **kwargs):
return data.shift(expr.n)
def array_coalesce(expr, lhs, rhs, wrap=None, **kwargs):
res = np.where(pd.isnull(lhs), rhs, lhs)
if not expr.dshape.shape:
res = res.item()
elif wrap:
res = wrap(res)
return res
@compute_up.register(
Coalesce, (Series, DaskSeries), (np.ndarray, Series, base, DaskSeries)
)
@compute_up.register(
Coalesce,
(Series, base, DaskSeries), (np.ndarray, Series, DaskSeries)
)
def compute_up_coalesce(expr, lhs, rhs, **kwargs):
return array_coalesce(expr, lhs, rhs, type(lhs))
@dispatch(Coalesce, (Series, DaskSeries, base))
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
lhs = data
rhs = t.rhs
else:
lhs = t.lhs
rhs = data
return compute_up_coalesce(t, lhs, rhs)
| bsd-3-clause |
yarikoptic/pystatsmodels | statsmodels/iolib/tests/test_foreign.py | 3 | 7414 | """
Tests for iolib/foreign.py
"""
import os
import warnings
from datetime import datetime
from numpy.testing import *
import numpy as np
from pandas import DataFrame, isnull
import pandas.util.testing as ptesting
from statsmodels.compatnp.py3k import BytesIO, asbytes
import statsmodels.api as sm
from statsmodels.iolib.foreign import (StataWriter, genfromdta,
_datetime_to_stata_elapsed, _stata_elapsed_date_to_datetime)
from statsmodels.datasets import macrodata
import pandas
pandas_old = int(pandas.__version__.split('.')[1]) < 9
# Test precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
curdir = os.path.dirname(os.path.abspath(__file__))
def test_genfromdta():
#Test genfromdta vs. results/macrodta.npy created with genfromtxt.
#NOTE: Stata handles data very oddly. Round tripping from csv to dta
# to ndarray 2710.349 (csv) -> 2510.2491 (stata) -> 2710.34912109375
# (dta/ndarray)
#res2 = np.load(curdir+'/results/macrodata.npy')
#res2 = res2.view((float,len(res2[0])))
from results.macrodata import macrodata_result as res2
res1 = genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta')
#res1 = res1.view((float,len(res1[0])))
assert_array_equal(res1 == res2, True)
def test_genfromdta_pandas():
from pandas.util.testing import assert_frame_equal
dta = macrodata.load_pandas().data
curdir = os.path.dirname(os.path.abspath(__file__))
res1 = sm.iolib.genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta',
pandas=True)
res1 = res1.astype(float)
assert_frame_equal(res1, dta)
def test_stata_writer_structured():
buf = BytesIO()
dta = macrodata.load().data
dtype = dta.dtype
dta = dta.astype(np.dtype([('year', int),
('quarter', int)] + dtype.descr[2:]))
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
assert_array_equal(dta, dta2)
def test_stata_writer_array():
buf = BytesIO()
dta = macrodata.load().data
dta = DataFrame.from_records(dta)
dta.columns = ["v%d" % i for i in range(1,15)]
writer = StataWriter(buf, dta.values)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
dta = dta.to_records(index=False)
assert_array_equal(dta, dta2)
def test_missing_roundtrip():
buf = BytesIO()
dta = np.array([(np.nan, np.inf, "")],
dtype=[("double_miss", float), ("float_miss", np.float32),
("string_miss", "a1")])
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta = genfromdta(buf, missing_flt=np.nan)
assert_(isnull(dta[0][0]))
assert_(isnull(dta[0][1]))
assert_(dta[0][2] == asbytes(""))
dta = genfromdta(os.path.join(curdir, "results/data_missing.dta"),
missing_flt=-999)
assert_(np.all([dta[0][i] == -999 for i in range(5)]))
def test_stata_writer_pandas():
buf = BytesIO()
dta = macrodata.load().data
dtype = dta.dtype
#as of 0.9.0 pandas only supports i8 and f8
dta = dta.astype(np.dtype([('year', 'i8'),
('quarter', 'i8')] + dtype.descr[2:]))
dta4 = dta.astype(np.dtype([('year', 'i4'),
('quarter', 'i4')] + dtype.descr[2:]))
dta = DataFrame.from_records(dta)
dta4 = DataFrame.from_records(dta4)
# dta is int64 'i8' given to Stata writer
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
dta5 = DataFrame.from_records(dta2)
# dta2 is int32 'i4' returned from Stata reader
if dta5.dtypes[1] is np.dtype('int64'):
ptesting.assert_frame_equal(dta.reset_index(), dta5)
else:
# don't check index because it has different size, int32 versus int64
ptesting.assert_frame_equal(dta4, dta5[dta5.columns[1:]])
def test_stata_writer_unicode():
# make sure to test with characters outside the latin-1 encoding
pass
@dec.skipif(pandas_old)
def test_genfromdta_datetime():
results = [(datetime(2006, 11, 19, 23, 13, 20), 1479596223000,
datetime(2010, 1, 20), datetime(2010, 1, 8), datetime(2010, 1, 1),
datetime(1974, 7, 1), datetime(2010, 1, 1), datetime(2010, 1, 1)),
(datetime(1959, 12, 31, 20, 3, 20), -1479590, datetime(1953, 10, 2),
datetime(1948, 6, 10), datetime(1955, 1, 1), datetime(1955, 7, 1),
datetime(1955, 1, 1), datetime(2, 1, 1))]
with warnings.catch_warnings(record=True) as w:
dta = genfromdta(os.path.join(curdir, "results/time_series_examples.dta"))
assert_(len(w) == 1) # should get a warning for that format.
assert_array_equal(dta[0].tolist(), results[0])
assert_array_equal(dta[1].tolist(), results[1])
with warnings.catch_warnings(record=True):
dta = genfromdta(os.path.join(curdir, "results/time_series_examples.dta"),
pandas=True)
assert_array_equal(dta.irow(0).tolist(), results[0])
assert_array_equal(dta.irow(1).tolist(), results[1])
def test_date_converters():
ms = [-1479597200000, -1e6, -1e5, -100, 1e5, 1e6, 1479597200000]
days = [-1e5, -1200, -800, -365, -50, 0, 50, 365, 800, 1200, 1e5]
weeks = [-1e4, -1e2, -53, -52, -51, 0, 51, 52, 53, 1e2, 1e4]
months = [-1e4, -1e3, -100, -13, -12, -11, 0, 11, 12, 13, 100, 1e3, 1e4]
quarter = [-100, -50, -5, -4, -3, 0, 3, 4, 5, 50, 100]
half = [-50, 40, 30, 10, 3, 2, 1, 0, 1, 2, 3, 10, 30, 40, 50]
year = [1, 50, 500, 1000, 1500, 1975, 2075]
for i in ms:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tc"), "tc"), i)
for i in days:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "td"), "td"), i)
for i in weeks:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tw"), "tw"), i)
for i in months:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tm"), "tm"), i)
for i in quarter:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tq"), "tq"), i)
for i in half:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "th"), "th"), i)
for i in year:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "ty"), "ty"), i)
@dec.skipif(pandas_old)
def test_datetime_roundtrip():
dta = np.array([(1, datetime(2010, 1, 1), 2),
(2, datetime(2010, 2, 1), 3),
(4, datetime(2010, 3, 1), 5)],
dtype=[('var1', float), ('var2', object), ('var3', float)])
buf = BytesIO()
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
assert_equal(dta, dta2)
dta = DataFrame.from_records(dta)
buf = BytesIO()
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf, pandas=True)
ptesting.assert_frame_equal(dta, dta2.drop('index', axis=1))
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb'],
exit=False)
| bsd-3-clause |
DCPROGS/DCQTGUI | dcqtgui/QTtrace.py | 1 | 21209 | #! /usr/bin/python
"""
A simple GUI to display and process single channel record.
Depends on pyqt and matplotlib modules.
"""
import math
try:
from PySide.QtCore import *
from PySide.QtGui import *
except:
raise ImportError("pyqt module is missing")
import numpy as np
import dcio
import filter
class TraceGUI(QMainWindow):
def __init__(self, parent=None):
super(TraceGUI, self).__init__(parent)
self.resize(1000, 700) # width, height in px
self.mainFrame = QWidget()
self.setWindowTitle('pyPlotsamp')
self.setBackgroundRole(QPalette.Base)
self.setAutoFillBackground(True)
self.painter = QPainter()
self.loaded = False
self.filtered = False
self.line_length = 5.0 # seconds
self.page_lines = 5
self.point_every = 50
self.line_separ = 10.0 # pA
self.pages = 1
self.page = 1
self.intervals = None
self.amplitudes = None
self.fc = 1000.0
fileMenu = self.menuBar().addMenu('&File')
fileSSDOpenAction = self.createAction("&Open SSD file", self.onSSDFileOpen,
None, "ssdfileopen", "File Open")
fileABFOpenAction = self.createAction("&Open ABF file", self.onABFFileOpen,
None, "abffileopen", "File Open")
fileIdealizedClampfitOpenAction = self.createAction(
"&Open Clampfit idealised file", self.onClampfitIdealisedOpen,
None, "clampfitfileopen", "File Open")
fileSaveAsAction = self.createAction("&Save As...", self.onFileSaveAs,
None, "filesaveas", "File Save As")
self.addActions(fileMenu, (fileSSDOpenAction, fileABFOpenAction,
fileIdealizedClampfitOpenAction, fileSaveAsAction))
plotMenu = self.menuBar().addMenu('&Plot')
nextPageAction = self.createAction("&Next page", self.onNextPage)
prevPageAction = self.createAction("&Previous page", self.onPrevPage)
printPageAction = self.createAction("&Print page", self.onPrint)
plotOptionsAction = self.createAction("&Plot options", self.onPlotOptions)
self.addActions(plotMenu, (nextPageAction,
prevPageAction, #printPageAction,
plotOptionsAction))
signalMenu = self.menuBar().addMenu('&Signal')
filterGausAction = self.createAction("&Gaussian filter", self.onFilterGaus)
sliceTraceAction = self.createAction("&Slice trace", self.onSliceTrace)
self.addActions(signalMenu, (filterGausAction, sliceTraceAction))
helpMenu = self.menuBar().addMenu('&Help')
helpAboutAction = self.createAction("&About", self.onHelpAbout)
self.addActions(helpMenu, (helpAboutAction, None))
def createAction(self, text, slot=None, shortcut=None, icon=None,
tip=None, checkable=False, signal="triggered()"):
"""
Create menu actions.
"""
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def addActions(self, target, actions):
"""
Add actions to menu.
"""
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def onSSDFileOpen(self):
"""
"""
self.filename, filt = QFileDialog.getOpenFileName(self,
"Open Data File...", "", "Consam files (*.ssd *.SSD *.dat *.DAT)")
self.h = dcio.ssd_read_header (self.filename)
self.trace = dcio.ssd_read_data(self.filename, self.h)
self.calfac = self.h['calfac']
self.srate = self.h['srate']
self.sample = 1 / self.h['srate']
self.points_total = self.h['ilen'] / 2
self.ffilter = self.h['filt']
self.file_type = 'ssd'
self.loaded = True
self.page = 1
self.update()
def onABFFileOpen(self):
"""
"""
self.filename, filt = QFileDialog.getOpenFileName(self,
"Open Data File...", "", "Axon files (*.abf)")
self.h = dcio.abf_read_header(self.filename)
self.trace = dcio.abf_read_data(self.filename, self.h)
self.points_total = self.h['IActualAcqLength'] / self.h['nADCNumChannels']
self.srate = 1 / (self.h['fADCSampleInterval'] * self.h['nADCNumChannels'])
self.sample = self.h['fADCSampleInterval'] * self.h['nADCNumChannels'] / 1e6
self.calfac = (1 /
#(6553.6
((self.h['IADCResolution'] / self.h['fADCRange']) * self.h['fTelegraphAdditGain'][self.h['nADCSamplingSeq'][0]] *
self.h['fInstrumentScaleFactor'][self.h['nADCSamplingSeq'][0]]))
self.ffilter = float(self.h['fSignalLowpassFilter'][self.h['nADCSamplingSeq'][0]])
self.file_type = 'abf'
self.loaded = True
self.page = 1
self.update()
def onClampfitIdealisedOpen(self):
self.filename, filt = QFileDialog.getOpenFileName(self,
"Open Data File...", "", "CSV files (*.csv)")
self.record = np.genfromtxt(self.filename, skip_header=1, delimiter=',')
self.intervals = self.record[:, 8] / 1000.0 # convert from ms to s
self.amplitudes = self.record[:, 6]
self.record_length = int(self.record[-1, 5] / 1000.0)
self.file_type = 'csv'
self.loaded = True
self.page = 1
self.pages = self.record_length / (self.page_lines * self.line_length)
self.remainder = -1
self.remainder_amplitude = 0.0
self.remainder_length = self.line_length / 20.0
self.update()
def onFileSaveAs(self):
"""
"""
self.out_filename, filt = QFileDialog.getSaveFileName(self,
"Save File As...", "",
"Consam file (*.ssd)")
if self.file_type == 'ssd':
if self.filtered:
self.h['ilen'] = self.points_total * 2
self.h['srate'] = self.srate
self.h['filt'] = self.ffilter
self.h['idt'] = self.sample * 1e6
dcio.ssd_save(self.out_filename, self.h, self.trace)
elif self.file_type == 'abf':
h_conv = dcio.abf2ssd(self.h)
dcio.ssd_save(self.out_filename, h_conv, self.trace)
def onSliceTrace(self):
"""
"""
dialog = SliceTraceDlg(self.points_total, self)
if dialog.exec_():
first, last = dialog.return_par()
self.original_trace = self.trace
self.original_points_total = self.points_total
self.points_total = last - (first - 1)
self.trace = np.zeros(self.points_total, 'h')
self.trace = self.original_trace[first-1 : last]
self.page = 1
self.update()
def onFilterGaus(self):
"""
"""
dialog = FilterOptsDlg(self)
if dialog.exec_():
fc = dialog.return_par()
self.original_trace = self.trace
self.original_ffilter = self.ffilter
self.original_srate = self.srate
self.original_sample = self.sample
self.original_points_total = self.points_total
trace_new, srate = filter.filter_trace(self.trace,
fc, self.ffilter, self.srate)
self.trace = trace_new.copy()
self.srate = srate
self.ffilter = fc
self.sample = 1 / srate
self.points_total = self.trace.shape[0]
self.filtered = True
self.page = 1
self.update()
def onPlotOptions(self):
"""
"""
dialog = PlotPageDlg(self)
if dialog.exec_():
self.line_length, self.page_lines, self.point_every, self.line_separ = dialog.return_par()
self.pages = self.record_length / (self.page_lines * self.line_length)
self.page = 1
self.update()
def onNextPage(self):
"""
"""
if self.page < self.pages:
self.page += 1
self.update()
def onPrevPage(self):
"""
"""
if self.page > 1:
self.page -= 1
self.update()
def onPrint(self):
"""
"""
printer=QPrinter(QPrinter.HighResolution)
printer.setOrientation(QPrinter.Landscape)
printDialog=QPrintDialog(printer)
if (printDialog.exec_() == QDialog.Accepted):
self.painter.begin(printer)
self.drawSCTrace(self.painter)
self.painter.end()
def onHelpAbout(self):
"""
"""
pass
def paintEvent(self, event):
"""
"""
if self.loaded:
self.painter.begin(self)
if self.file_type == 'ssd' or self.file_type == 'abf':
self.drawSCTrace(self.painter)
elif self.file_type == 'csv':
self.drawIdealisedTrace(self.painter)
self.painter.end()
def drawIdealisedTrace(self, event):
"""
"""
average = np.average(self.amplitudes[:2])
yStartDbl = float((self.page_lines +1) * self.line_separ)
page_str = (self.filename + "; Page " + str(self.page) + " of " +
str(self.pages))
point_str = ("Seconds/line: {0:.3f}; line separation (pA): {1:.3f}".
format(self.line_length, self.line_separ))
self.painter.drawText(100, 50, page_str)
self.painter.drawText(100, 650, point_str)
for j in range(self.page_lines):
xDbl1 = 0
yDbl1 = (self.remainder_amplitude - average) + yStartDbl - (j+1)*self.line_separ
line_end = False
running_length = self.remainder_length
while (not line_end) and (self.remainder < len(self.record)-1):
if (running_length < self.line_length):
xDbl2 = running_length
yDbl2 = float((self.remainder_amplitude - average) + yStartDbl - (j+1)*self.line_separ)
self.draw_line(xDbl1, yDbl1, xDbl2, yDbl2)
self.remainder += 1
if math.isnan(self.intervals[self.remainder]):
self.remainder_amplitude = 0.0
self.remainder_length = 0.5
#self.remainder_length = self.record[self.remainder + 1, 4] - self.record[self.remainder - 1, 5]
else:
self.remainder_length = self.intervals[self.remainder]
self.remainder_amplitude = self.amplitudes[self.remainder]
running_length += self.remainder_length
xDbl1 = xDbl2
yDbl1 = float((self.remainder_amplitude - average) + yStartDbl - (j+1)*self.line_separ)
self.draw_line(xDbl1, yDbl2, xDbl1, yDbl1)
else:
xDbl2 = self.line_length
yDbl2 = float((self.remainder_amplitude - average) + yStartDbl - (j+1)*self.line_separ)
self.draw_line(xDbl1, yDbl1, xDbl2, yDbl2)
line_end = True
self.remainder_length = running_length - self.line_length
def draw_line(self, xDbl1, yDbl1, xDbl2, yDbl2):
xMinPix = int(self.width() * 5 / 100)
xMaxPix = int(self.width() * 90 / 100)
yMaxPix = int(self.height() * 10 / 100)
yMinPix = int(self.height() * 90 / 100)
xMinDbl = float(0)
xMaxDbl = float(self.line_length)
yMinDbl = float(0)
yMaxDbl = float(self.page_lines + 2) * self.line_separ
xScaleDbl = float(xMaxPix - xMinPix) / float(xMaxDbl - xMinDbl)
yScaleDbl = float(yMaxPix - yMinPix) / float(yMaxDbl - yMinDbl)
xPix1 = xMinPix + int((xMinDbl) * xScaleDbl)
yPix1 = yMinPix + int((yMinDbl) * yScaleDbl)
xPix2 = xMinPix + int((xMaxDbl) * xScaleDbl)
yPix2 = yMinPix + int((yMaxDbl) * yScaleDbl)
xPix1 = xMinPix + int((xDbl1 - xMinDbl) * xScaleDbl)
yPix1 = yMinPix + int((yDbl1 - yMinDbl) * yScaleDbl)
xPix2 = xMinPix + int((xDbl2 - xMinDbl) * xScaleDbl)
yPix2 = yMinPix + int((yDbl2 - yMinDbl) * yScaleDbl)
self.painter.drawLine(xPix1, yPix1, xPix2, yPix2)
def drawSCTrace(self, event):
"""
"""
line_points = int(self.line_length / self.sample + 1)
page_points = line_points * self.page_lines
line_points_draw = int(line_points / self.point_every)
self.pages = self.points_total / page_points
average = np.average(self.trace[:line_points])
page_str = (self.filename + "; Page " + str(self.page) + " of " +
str(self.pages))
point_str = ("Points " + str(page_points * (self.page - 1) + 1) +
" to " + str(page_points * self.page) + " every " +
str(self.point_every) + " point(s); seconds/line: " +
str(self.line_length) + "; line separation (pA): " + str(self.line_separ))
self.painter.drawText(100, 50, page_str)
self.painter.drawText(100, 650, point_str)
xMinPix = int(self.width() * 5 / 100)
xMaxPix = int(self.width() * 90 / 100)
yMaxPix = int(self.height() * 10 / 100)
yMinPix = int(self.height() * 90 / 100)
xMinDbl = float(0)
xMaxDbl = float(self.line_length)
yMinDbl = float(0)
yMaxDbl = float(self.page_lines + 2) * self.line_separ
yStartDbl = float((self.page_lines +1) * self.line_separ)
xScaleDbl = float(xMaxPix - xMinPix) / float(xMaxDbl - xMinDbl)
yScaleDbl = float(yMaxPix - yMinPix) / float(yMaxDbl - yMinDbl)
xPix1 = xMinPix + int((xMinDbl) * xScaleDbl)
yPix1 = yMinPix + int((yMinDbl) * yScaleDbl)
xPix2 = xMinPix + int((xMaxDbl) * xScaleDbl)
yPix2 = yMinPix + int((yMaxDbl) * yScaleDbl)
for j in range(self.page_lines):
xDbl1 = 0
yDbl1 = (self.trace[0 + page_points*(self.page-1) + line_points * j] - average) * self.calfac + yStartDbl - (j+1)*self.line_separ
for i in range (line_points_draw):
xDbl2 = float((i+1) * self.sample * self.point_every)
yDbl2 = float((self.trace[0 + page_points*(self.page-1) + line_points * j + (i+1)*self.point_every] - average) * self.calfac + yStartDbl - (j+1)*self.line_separ)
xPix1 = xMinPix + int((xDbl1 - xMinDbl) * xScaleDbl)
yPix1 = yMinPix + int((yDbl1 - yMinDbl) * yScaleDbl)
xPix2 = xMinPix + int((xDbl2 - xMinDbl) * xScaleDbl)
yPix2 = yMinPix + int((yDbl2 - yMinDbl) * yScaleDbl)
self.painter.drawLine(xPix1, yPix1, xPix2, yPix2)
xDbl1 = xDbl2
yDbl1 = yDbl2
class PlotPageDlg(QDialog):
"""
Dialog to input page plotting parameters.
"""
def __init__(self, parent=None):
super(PlotPageDlg, self).__init__(parent)
self.line_length = 5 # seconds
self.page_lines = 5
self.point_every = 50
self.line_separ = 10 # pA
layoutMain = QVBoxLayout()
layoutMain.addWidget(QLabel('Plot layout options'))
layout = QHBoxLayout()
layout.addWidget(QLabel("Seconds per line:"))
self.lengthEdit = QLineEdit(unicode(self.line_length))
self.lengthEdit.setMaxLength(10)
self.connect(self.lengthEdit, SIGNAL("editingFinished()"),
self.on_par_changed)
layout.addWidget(self.lengthEdit)
layoutMain.addLayout(layout)
layout = QHBoxLayout()
layout.addWidget(QLabel("Number of lines per page:"))
self.linesEdit = QLineEdit(unicode(self.page_lines))
self.linesEdit.setMaxLength(10)
self.connect(self.linesEdit, SIGNAL("editingFinished()"),
self.on_par_changed)
layout.addWidget(self.linesEdit)
layoutMain.addLayout(layout)
layout = QHBoxLayout()
layout.addWidget(QLabel("Draw every nth point:"))
self.everyEdit = QLineEdit(unicode(self.point_every))
self.everyEdit.setMaxLength(10)
self.connect(self.everyEdit, SIGNAL("editingFinished()"),
self.on_par_changed)
layout.addWidget(self.everyEdit)
layoutMain.addLayout(layout)
layout = QHBoxLayout()
layout.addWidget(QLabel("pA between lines:"))
self.separEdit = QLineEdit(unicode(self.line_separ))
self.separEdit.setMaxLength(10)
self.connect(self.separEdit, SIGNAL("editingFinished()"),
self.on_par_changed)
layout.addWidget(self.separEdit)
layoutMain.addLayout(layout)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok|
QDialogButtonBox.Cancel)
self.connect(buttonBox, SIGNAL("accepted()"),
self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"),
self, SLOT("reject()"))
layoutMain.addWidget(buttonBox)
self.setLayout(layoutMain)
self.setWindowTitle("Plot layout options...")
def on_par_changed(self):
"""
"""
self.line_length = int(self.lengthEdit.text())
self.page_lines = int(self.linesEdit.text())
self.point_every = int(self.everyEdit.text())
self.line_separ = int(self.separEdit.text())
def return_par(self):
"""
Return parameters on exit.
"""
return self.line_length, self.page_lines, self.point_every, self.line_separ
class FilterOptsDlg(QDialog):
"""
Dialog to input filter options.
"""
def __init__(self, parent=None):
super(FilterOptsDlg, self).__init__(parent)
self.filter = 1000 # Hz
layoutMain = QVBoxLayout()
layoutMain.addWidget(QLabel('Filter options:'))
layout = QHBoxLayout()
layout.addWidget(QLabel("Filter with Gaussian filter to have final fc (Hz):"))
self.filterEdit = QLineEdit(unicode(self.filter))
self.filterEdit.setMaxLength(10)
self.connect(self.filterEdit, SIGNAL("editingFinished()"),
self.on_par_changed)
layout.addWidget(self.filterEdit)
layoutMain.addLayout(layout)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok|
QDialogButtonBox.Cancel)
self.connect(buttonBox, SIGNAL("accepted()"),
self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"),
self, SLOT("reject()"))
layoutMain.addWidget(buttonBox)
self.setLayout(layoutMain)
self.setWindowTitle("Filter options...")
def on_par_changed(self):
"""
"""
self.filter = int(self.filterEdit.text())
def return_par(self):
"""
Return parameters on exit.
"""
return self.filter
class SliceTraceDlg(QDialog):
"""
Dialog to input trace slice limits.
"""
def __init__(self, allpoints, parent=None):
super(SliceTraceDlg, self).__init__(parent)
self.first = 1
self.last = allpoints
layoutMain = QVBoxLayout()
layoutMain.addWidget(QLabel('Slice trace:'))
# First and last data points to be used
layout = QHBoxLayout()
layout.addWidget(QLabel("First "))
self.firstEdit = QLineEdit(unicode(self.first))
self.firstEdit.setMaxLength(10)
self.connect(self.firstEdit, SIGNAL("editingFinished()"),
self.on_par_changed)
layout.addWidget(self.firstEdit)
layout.addWidget(QLabel(" and last "))
self.lastEdit = QLineEdit(unicode(self.last))
self.lastEdit.setMaxLength(10)
self.connect(self.lastEdit, SIGNAL("editingFinished()"),
self.on_par_changed)
layout.addWidget(self.lastEdit)
layout.addWidget(QLabel(" data points to be used."))
layoutMain.addLayout(layout)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok|
QDialogButtonBox.Cancel)
self.connect(buttonBox, SIGNAL("accepted()"),
self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"),
self, SLOT("reject()"))
layoutMain.addWidget(buttonBox)
self.setLayout(layoutMain)
self.setWindowTitle("Trace slice...")
def on_par_changed(self):
"""
"""
self.first = int(self.firstEdit.text())
self.last = int(self.lastEdit.text())
def return_par(self):
"""
Return parameters on exit.
"""
return self.first, self.last
| gpl-2.0 |
PrashntS/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
idlead/scikit-learn | examples/exercises/plot_cv_diabetes.py | 19 | 2613 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = LassoCV(alphas=alphas)
k_fold = KFold(3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold.split(X, y)):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
lbishal/scikit-learn | examples/preprocessing/plot_function_transformer.py | 158 | 1993 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| bsd-3-clause |
nicoddemus/backtrader | backtrader/plot/plot.py | 1 | 24950 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import bisect
import collections
try:
from collections import OrderedDict
except ImportError:
from ..utils.ordereddict import OrderedDict
import math
import six
from six.moves import xrange
import matplotlib.dates as mdates
import matplotlib.font_manager as mfontmgr
import matplotlib.legend as mlegend
import matplotlib.pyplot as mpyplot
import matplotlib.ticker as mticker
from .. import AutoInfoClass, MetaParams, TimeFrame
from .finance import plot_candlestick, plot_ohlc, plot_volume, plot_lineonclose
from .formatters import (MyVolFormatter, MyDateFormatter, getlocator)
from .scheme import PlotScheme
from .utils import tag_box_style
from .multicursor import MultiCursor
class PInfo(object):
def __init__(self, sch):
self.sch = sch
self.nrows = 0
self.row = 0
self.clock = None
self.x = None
self.xlen = 0
self.sharex = None
self.figs = list()
self.cursors = list()
self.daxis = OrderedDict()
self.ldaxis = list()
self.zorder = dict()
self.coloridx = collections.defaultdict(lambda: -1)
self.prop = mfontmgr.FontProperties(size=self.sch.subtxtsize)
def newfig(self, numfig):
fig = mpyplot.figure(numfig)
self.figs.append(fig)
self.daxis = OrderedDict()
self.ldaxis.append(self.daxis)
self.row = 0
self.sharex = None
return fig
def nextcolor(self, ax):
self.coloridx[ax] += 1
return self.coloridx[ax]
def color(self, ax):
return self.sch.color(self.coloridx[ax])
def zordernext(self, ax):
z = self.zorder[ax]
if self.sch.zdown:
return z * 0.9999
return z * 1.0001
def zordercur(self, ax):
return self.zorder[ax]
class Plot(six.with_metaclass(MetaParams, object)):
params = (('scheme', PlotScheme()),)
def __init__(self, **kwargs):
for pname, pvalue in kwargs.items():
setattr(self.p.scheme, pname, pvalue)
def drawtag(self, ax, x, y, facecolor, edgecolor, alpha=0.9, **kwargs):
txt = ax.text(x, y, '%.2f' % y, va='center', ha='left',
fontsize=self.pinf.sch.subtxtsize,
bbox=dict(boxstyle=tag_box_style,
facecolor=facecolor,
edgecolor=edgecolor,
alpha=alpha),
# 3.0 is the minimum default for text
zorder=self.pinf.zorder[ax] + 3.0,
**kwargs)
def plot(self, strategy, numfigs=1):
if not strategy.datas:
return
self.pinf = PInfo(self.p.scheme)
self.sortdataindicators(strategy)
self.calcrows(strategy)
slen = len(strategy)
d, m = divmod(slen, numfigs)
pranges = list()
for i in xrange(numfigs):
a = d * i
if i == (numfigs - 1):
d += m # add remainder to last stint
b = a + d
pranges.append([a, b, d])
for numfig in xrange(numfigs):
# prepare a figure
fig = self.pinf.newfig(numfig)
self.pinf.pstart, self.pinf.pend, self.pinf.psize = pranges[numfig]
self.pinf.xstart = self.pinf.pstart
self.pinf.xend = self.pinf.pend
self.pinf.clock = strategy._clock
self.pinf.xreal = strategy._clock.datetime.plot(
self.pinf.pstart, self.pinf.psize)
self.pinf.xlen = len(self.pinf.xreal)
self.pinf.x = list(xrange(self.pinf.xlen))
# Do the plotting
# Things that go always at the top (observers)
for ptop in self.dplotstop:
self.plotind(ptop, subinds=self.dplotsover[ptop])
# Create the rest on a per data basis
for data in strategy.datas:
for ind in self.dplotsup[data]:
self.plotind(
ind,
subinds=self.dplotsover[ind],
upinds=self.dplotsup[ind],
downinds=self.dplotsdown[ind])
self.plotdata(data, self.dplotsover[data])
for ind in self.dplotsdown[data]:
self.plotind(
ind,
subinds=self.dplotsover[ind],
upinds=self.dplotsup[ind],
downinds=self.dplotsdown[ind])
cursor = MultiCursor(
fig.canvas, list(self.pinf.daxis.values()),
useblit=True, horizOn=True, vertOn=True,
horizShared=True, vertShared=False,
horizMulti=True, vertMulti=False,
color='black', lw=1, ls=':')
self.pinf.cursors.append(cursor)
lastax = list(self.pinf.daxis.values())[-1]
# Date formatting for the x axis - only the last one needs it
if False:
locator = mticker.AutoLocator()
lastax.xaxis.set_major_locator(locator)
# lastax.xaxis.set_major_formatter(MyDateFormatter(self.pinf.xreal))
formatter = mdates.IndexDateFormatter(self.pinf.xreal,
fmt='%Y-%m-%d')
lastax.xaxis.set_major_formatter(formatter)
else:
self.setlocators(strategy._clock)
# Put the subplots as indicated by hspace
fig.subplots_adjust(hspace=self.pinf.sch.plotdist,
top=0.98, left=0.05, bottom=0.05, right=0.95)
# Applying fig.autofmt_xdate if the data axis is the last one
# breaks the presentation of the date labels. why?
# Applying the manual rotation with setp cures the problem
# but the labels from all axis but the last have to be hidden
if False:
fig.autofmt_xdate(bottom=0.25, rotation=0)
elif True:
for ax in self.pinf.daxis.values():
mpyplot.setp(ax.get_xticklabels(), visible=False)
# ax.autoscale_view(tight=True)
mpyplot.setp(lastax.get_xticklabels(),
visible=True,
rotation=self.pinf.sch.tickrotation)
# Things must be tight along the x axis (to fill both ends)
axtight = 'x' if not self.pinf.sch.ytight else 'both'
mpyplot.autoscale(enable=True, axis=axtight, tight=True)
def setlocators(self, data):
ax = list(self.pinf.daxis.values())[-1]
comp = getattr(data, '_compression', 1)
tframe = getattr(data, '_timeframe', TimeFrame.Days)
if tframe == TimeFrame.Years:
fmtmajor = '%Y'
fmtminor = '%Y'
fmtdata = '%Y'
elif tframe == TimeFrame.Months:
fmtmajor = '%Y'
fmtminor = '%b'
fmtdata = '%b'
elif tframe == TimeFrame.Weeks:
fmtmajor = '%b'
fmtminor = '%d'
fmtdata = '%d'
elif tframe == TimeFrame.Days:
fmtmajor = '%b'
fmtminor = '%d'
fmtdata = '%Y-%m-%d'
elif tframe == TimeFrame.Minutes:
fmtmajor = '%d %b'
fmtminor = '%H:%M'
fmtdata = '%Y-%m-%d %H:%M'
fordata = mdates.IndexDateFormatter(self.pinf.xreal, fmt=fmtdata)
for dax in self.pinf.daxis.values():
dax.fmt_xdata = fordata
locmajor = mticker.AutoLocator()
locminor = mticker.AutoMinorLocator()
ax.xaxis.set_minor_locator(locminor)
ax.xaxis.set_major_locator(locmajor)
formajor = mdates.IndexDateFormatter(self.pinf.xreal, fmt=fmtmajor)
forminor = mdates.IndexDateFormatter(self.pinf.xreal, fmt=fmtminor)
ax.xaxis.set_minor_formatter(forminor)
ax.xaxis.set_major_formatter(formajor)
def calcrows(self, strategy):
# Calculate the total number of rows
rowsmajor = self.pinf.sch.rowsmajor
rowsminor = self.pinf.sch.rowsminor
nrows = 0
# Datas and volumes
nrows += len(strategy.datas) * rowsmajor
if self.pinf.sch.volume and not self.pinf.sch.voloverlay:
nrows += len(strategy.datas) * rowsminor
# top indicators/observers
nrows += len(self.dplotstop) * rowsminor
# indicators above datas
nrows += sum(len(v) for v in self.dplotsup.values())
nrows += sum(len(v) for v in self.dplotsdown.values())
self.pinf.nrows = nrows
def newaxis(self, obj, rowspan):
ax = mpyplot.subplot2grid((self.pinf.nrows, 1), (self.pinf.row, 0),
rowspan=rowspan, sharex=self.pinf.sharex)
# update the sharex information if not available
if self.pinf.sharex is None:
self.pinf.sharex = ax
# update the row index with the taken rows
self.pinf.row += rowspan
# save the mapping indicator - axis and return
self.pinf.daxis[obj] = ax
# Activate grid in all axes if requested
ax.yaxis.tick_right()
ax.grid(self.pinf.sch.grid, which='both')
return ax
def plotind(self, ind,
subinds=None, upinds=None, downinds=None,
masterax=None):
ind._plotinit()
sch = self.p.scheme
# check subind
subinds = subinds or []
upinds = upinds or []
downinds = downinds or []
# plot subindicators on self with independent axis above
for upind in upinds:
self.plotind(upind)
# Get an axis for this plot
ax = masterax or self.newaxis(ind, rowspan=self.pinf.sch.rowsminor)
indlabel = ind.plotlabel()
for lineidx in range(ind.size()):
line = ind.lines[lineidx]
linealias = ind.lines._getlinealias(lineidx)
lineplotinfo = getattr(ind.plotlines, '_%d' % lineidx, None)
if not lineplotinfo:
lineplotinfo = getattr(ind.plotlines, linealias, None)
if not lineplotinfo:
lineplotinfo = AutoInfoClass()
if lineplotinfo._get('_plotskip', False):
continue
# Legend label only when plotting 1st line
if masterax and not ind.plotinfo.plotlinelabels:
label = indlabel * (lineidx == 0) or '_nolegend'
else:
label = lineplotinfo._get('_name', '') or linealias
# plot data
lplot = line.plotrange(self.pinf.xstart, self.pinf.xend)
if not math.isnan(lplot[-1]):
label += ' %.2f' % lplot[-1]
plotkwargs = dict()
linekwargs = lineplotinfo._getkwargs(skip_=True)
if linekwargs.get('color', None) is None:
if not lineplotinfo._get('_samecolor', False):
self.pinf.nextcolor(ax)
plotkwargs['color'] = self.pinf.color(ax)
plotkwargs.update(dict(aa=True, label=label))
plotkwargs.update(**linekwargs)
if ax in self.pinf.zorder:
plotkwargs['zorder'] = self.pinf.zordernext(ax)
pltmethod = getattr(ax, lineplotinfo._get('_method', 'plot'))
plottedline = pltmethod(self.pinf.x, lplot, **plotkwargs)
try:
plottedline = plottedline[0]
except:
# Possibly a container of artists (when plotting bars)
pass
self.pinf.zorder[ax] = plottedline.get_zorder()
if not math.isnan(lplot[-1]):
# line has valid values, plot a tag for the last value
self.drawtag(ax, len(self.pinf.xreal), lplot[-1],
facecolor='white',
edgecolor=self.pinf.color(ax))
# plot subindicators that were created on self
for subind in subinds:
self.plotind(subind, subinds=self.dplotsover[subind], masterax=ax)
if not masterax:
# adjust margin if requested ... general of particular
ymargin = ind.plotinfo._get('plotymargin', 0.0)
ymargin = max(ymargin, self.pinf.sch.yadjust)
if ymargin:
ax.margins(y=ymargin)
# Set specific or generic ticks
yticks = ind.plotinfo._get('plotyticks', [])
if not yticks:
yticks = ind.plotinfo._get('plotyhlines', [])
if yticks:
ax.set_yticks(yticks)
else:
locator = mticker.MaxNLocator(nbins=4, prune='both')
ax.yaxis.set_major_locator(locator)
# Set specific hlines if asked to
hlines = ind.plotinfo._get('plothlines', [])
if not hlines:
hlines = ind.plotinfo._get('plotyhlines', [])
for hline in hlines:
ax.axhline(hline, color=self.pinf.sch.hlinescolor,
ls=self.pinf.sch.hlinesstyle,
lw=self.pinf.sch.hlineswidth)
if self.pinf.sch.legendind and \
ind.plotinfo._get('plotlegend', True):
handles, labels = ax.get_legend_handles_labels()
# Ensure that we have something to show
if labels:
# Legend done here to ensure it includes all plots
legend = ax.legend(loc=self.pinf.sch.legendindloc,
numpoints=1, frameon=False,
shadow=False, fancybox=False,
prop=self.pinf.prop)
legend.set_title(indlabel, prop=self.pinf.prop)
# hack: if title is set. legend has a Vbox for the labels
# which has a default "center" set
legend._legend_box.align = 'left'
# plot subindicators on self with independent axis below
for downind in downinds:
self.plotind(downind)
def plotvolume(self, data, opens, highs, lows, closes, volumes, label):
if self.pinf.sch.voloverlay:
rowspan = self.pinf.sch.rowsmajor
else:
rowspan = self.pinf.sch.rowsminor
ax = self.newaxis(data.volume, rowspan=rowspan)
if self.pinf.sch.voloverlay:
volalpha = self.pinf.sch.voltrans
else:
volalpha = 1.0
maxvol = volylim = max(volumes)
if maxvol:
# Plot the volume (no matter if as overlay or standalone)
vollabel = label
volplot, = plot_volume(ax, self.pinf.x, opens, closes, volumes,
colorup=self.pinf.sch.volup,
colordown=self.pinf.sch.voldown,
alpha=volalpha, label=vollabel)
nbins = 6
prune = 'both'
if self.pinf.sch.voloverlay:
# store for a potential plot over it
nbins = int(nbins / self.pinf.sch.volscaling)
prune = None
volylim /= self.pinf.sch.volscaling
ax.set_ylim(0, volylim, auto=True)
else:
# plot a legend
handles, labels = ax.get_legend_handles_labels()
if handles:
# Legend done here to ensure it includes all plots
legend = ax.legend(loc=self.pinf.sch.legendindloc,
numpoints=1, frameon=False,
shadow=False, fancybox=False,
prop=self.pinf.prop)
locator = mticker.MaxNLocator(nbins=nbins, prune=prune)
ax.yaxis.set_major_locator(locator)
ax.yaxis.set_major_formatter(MyVolFormatter(maxvol))
if not maxvol:
ax.set_yticks([])
return None
return volplot
def setxdata(self, data):
# only if this data has a master, do something
if data.mlen:
# this data has a master, get the real length of this data
self.pinf.xlen = len(data.mlen)
# find the starting point with regards to master start: pstart
self.pinf.xstart = bisect.bisect_left(
data.mlen, self.pinf.pstart)
# find the ending point with regards to master start: pend
self.pinf.xend = bisect.bisect_right(
data.mlen, self.pinf.pend)
# extract the Xs from the subdata
self.pinf.x = data.mlen[self.pinf.xstart:self.pinf.xend]
# rebase the Xs to the start of the main data point
self.pinf.x = [x - self.pinf.pstart for x in self.pinf.x]
def plotdata(self, data, indicators):
for ind in indicators:
upinds = self.dplotsup[ind]
for upind in upinds:
self.plotind(upind,
subinds=self.dplotsover[upind],
upinds=self.dplotsup[upind],
downinds=self.dplotsdown[upind])
# set the x axis data (if needed)
self.setxdata(data)
opens = data.open.plotrange(self.pinf.xstart, self.pinf.xend)
highs = data.high.plotrange(self.pinf.xstart, self.pinf.xend)
lows = data.low.plotrange(self.pinf.xstart, self.pinf.xend)
closes = data.close.plotrange(self.pinf.xstart, self.pinf.xend)
volumes = data.volume.plotrange(self.pinf.xstart, self.pinf.xend)
vollabel = 'Volume'
if self.pinf.sch.volume and self.pinf.sch.voloverlay:
volplot = self.plotvolume(
data, opens, highs, lows, closes, volumes, vollabel)
axvol = self.pinf.daxis[data.volume]
ax = axvol.twinx()
self.pinf.daxis[data] = ax
else:
ax = self.newaxis(data, rowspan=self.pinf.sch.rowsmajor)
datalabel = ''
dataname = ''
if hasattr(data, '_name') and data._name:
datalabel += data._name
if hasattr(data, '_compression') and \
hasattr(data, '_timeframe'):
tfname = TimeFrame.getname(data._timeframe, data._compression)
datalabel += ' (%d %s)' % (data._compression, tfname)
datalabel += ' O:%.2f H:%2.f L:%.2f C:%.2f' % \
(opens[-1], highs[-1], lows[-1], closes[-1])
if self.pinf.sch.style.startswith('line'):
plotted = plot_lineonclose(
ax, self.pinf.x, closes,
color=self.pinf.sch.loc, label=datalabel)
else:
if self.pinf.sch.style.startswith('candle'):
plotted = plot_candlestick(
ax, self.pinf.x, opens, highs, lows, closes,
colorup=self.pinf.sch.barup,
colordown=self.pinf.sch.bardown,
label=datalabel)
elif self.pinf.sch.style.startswith('bar') or True:
# final default option -- should be "else"
plotted = plot_ohlc(
ax, self.pinf.x, opens, highs, lows, closes,
colorup=self.pinf.sch.barup,
colordown=self.pinf.sch.bardown,
label=datalabel)
self.pinf.zorder[ax] = plotted[0].get_zorder()
# Code to place a label at the right hand side with the last value
self.drawtag(ax, len(self.pinf.xreal), closes[-1],
facecolor='white', edgecolor=self.pinf.sch.loc)
ax.yaxis.set_major_locator(mticker.MaxNLocator(prune='both'))
# make sure "over" indicators do not change our scale
ax.set_ylim(ax.get_ylim())
if self.pinf.sch.volume:
if not self.pinf.sch.voloverlay:
self.plotvolume(
data, opens, highs, lows, closes, volumes, vollabel)
else:
# Prepare overlay scaling/pushup or manage own axis
if self.pinf.sch.volpushup:
# push up overlaid axis by lowering the bottom limit
axbot, axtop = ax.get_ylim()
axbot *= (1.0 - self.pinf.sch.volpushup)
ax.set_ylim(axbot, axtop)
for ind in indicators:
self.plotind(ind, subinds=self.dplotsover[ind], masterax=ax)
handles, labels = ax.get_legend_handles_labels()
if handles:
# put data and volume legend entries in the 1st positions
# because they are "collections" they are considered after Line2D
# for the legend entries, which is not our desire
if self.pinf.sch.volume and self.pinf.sch.voloverlay:
if volplot:
# even if volume plot was requested, there may be no volume
labels.insert(0, vollabel)
handles.insert(0, volplot)
didx = labels.index(datalabel)
labels.insert(0, labels.pop(didx))
handles.insert(0, handles.pop(didx))
# feed handles/labels to legend to get right order
legend = ax.legend(handles, labels,
loc='upper left', frameon=False, shadow=False,
fancybox=False,
prop=self.pinf.prop, numpoints=1, ncol=1)
# hack: if title is set. legend has a Vbox for the labels
# which has a default "center" set
legend._legend_box.align = 'left'
for ind in indicators:
downinds = self.dplotsdown[ind]
for downind in downinds:
self.plotind(downind,
subinds=self.dplotsover[downind],
upinds=self.dplotsup[downind],
downinds=self.dplotsdown[downind])
def show(self):
mpyplot.show()
def sortdataindicators(self, strategy):
# These lists/dictionaries hold the subplots that go above each data
self.dplotstop = list()
self.dplotsup = collections.defaultdict(list)
self.dplotsdown = collections.defaultdict(list)
self.dplotsover = collections.defaultdict(list)
# Sort observers in the different lists/dictionaries
for x in strategy.getobservers():
if not x.plotinfo.plot or x.plotinfo.plotskip:
continue
if x.plotinfo.subplot:
self.dplotstop.append(x)
else:
key = getattr(x._clock, 'owner', x._clock)
self.dplotsover[key].append(x)
# Sort indicators in the different lists/dictionaries
for x in strategy.getindicators():
if not hasattr(x, 'plotinfo'):
# no plotting support - so far LineSingle derived classes
continue
if not x.plotinfo.plot or x.plotinfo.plotskip:
continue
# support LineSeriesStub which has "owner" to point to the data
key = getattr(x._clock, 'owner', x._clock)
if getattr(x.plotinfo, 'plotforce', False):
if key not in strategy.datas:
datas = strategy.datas
while True:
if key not in strategy.datas:
key = key._clock
else:
break
if x.plotinfo.subplot:
if x.plotinfo.plotabove:
self.dplotsup[key].append(x)
else:
self.dplotsdown[key].append(x)
else:
self.dplotsover[key].append(x)
| gpl-3.0 |
mjabri/holoviews | tests/testrenderclass.py | 1 | 3450 | # -*- coding: utf-8 -*-
"""
Test cases for rendering exporters
"""
from hashlib import sha256
from unittest import SkipTest
import numpy as np
from holoviews.plotting.mpl.renderer import MPLRenderer
from holoviews import HoloMap, Image, ItemTable
from holoviews.element.comparison import ComparisonTestCase
from nose.plugins.attrib import attr
try:
# Standardize backend due to random inconsistencies
from matplotlib import pyplot
pyplot.switch_backend('agg')
except:
pyplot = None
def digest_data(data):
hashfn = sha256()
hashfn.update(data)
return hashfn.hexdigest()
@attr(optional=1)
class MPLRendererTest(ComparisonTestCase):
"""
Note if not possible to compare the hashes of SVG and WebM formats
as the hashes are not stable across exports.
"""
def setUp(self):
if pyplot is None:
raise SkipTest("Matplotlib required to test widgets")
self.basename = 'no-file'
self.image1 = Image(np.array([[0,1],[2,3]]), label='Image1')
self.image2 = Image(np.array([[1,0],[4,-2]]), label='Image2')
self.map1 = HoloMap({1:self.image1, 2:self.image2}, label='TestMap')
self.unicode_table = ItemTable([('β','Δ1'), ('°C', '3×4')],
label='Poincaré', group='α Festkörperphysik')
self.renderer = MPLRenderer.instance()
def test_simple_export_gif(self):
data = self.renderer(self.map1, fmt='gif')[0]
self.assertEqual(digest_data(data),
'95258c17d10620f20604c9cbd17e6b65e886a6163c96d6574f3eb812e0f149c2')
def test_simple_export_gif_double_size(self):
data = self.renderer.instance(size=200)(self.map1, fmt='gif')[0]
self.assertEqual(digest_data(data),
'fbe6d753df1471315cbd83d370379591af0fdea114601c5ce1a615777749ca91')
def test_simple_export_gif_half_fps(self):
data = self.renderer.instance(fps=5)(self.map1, fmt='gif', )[0]
self.assertEqual(digest_data(data),
'add756aa3caeb4c5f2396cdd5bd0122128c6a1275de9d3a44a0c21a734c4d5f4')
def test_simple_export_png1(self):
data = self.renderer(self.image1, fmt='png')[0]
self.assertEqual(digest_data(data),
'08b7d97e79f715f9d8593416b1f6561ebe3e75bb038172ffd6048286ab09e671')
def test_simple_export_png1_double_size(self):
data = self.renderer.instance(size=200)(self.image1, fmt='png')[0]
self.assertEqual(digest_data(data),
'0b48a3e366b300fcfea3cba47d6a07631a8fcc02a96860f1291233ef2c976764')
def test_simple_export_png2(self):
data = self.renderer(self.image2, fmt='png')[0]
self.assertEqual(digest_data(data),
'2b5f1639584cd4c18c01cfb3f26d26dfa582fff39b869223269c0d941f17cc8b')
def test_simple_export_png2_double_size(self):
data = self.renderer.instance(size=200)(self.image2, fmt='png')[0]
self.assertEqual(digest_data(data),
'ef8e2df9c3d3a27e738ae1ca9f5b0704b6467cb44265f7933a3c137ce8a8a519')
def test_simple_export_unicode_table_png(self):
"Test that unicode support and rendering is working"
data = self.renderer.instance(size=200)(self.unicode_table, fmt='png')[0]
self.assertEqual(digest_data(data),
'a3dd68a888de14064cb621c14be5b175d96781cdbc932a3f778def34beaee1ff')
| bsd-3-clause |
mueller-lab/PyFRAP | pyfrp/subclasses/pyfrp_fit.py | 2 | 30354 | #=====================================================================================================================================
#Copyright
#=====================================================================================================================================
#Copyright (C) 2014 Alexander Blaessle, Patrick Mueller and the Friedrich Miescher Laboratory of the Max Planck Society
#This software is distributed under the terms of the GNU General Public License.
#This file is part of PyFRAP.
#PyFRAP is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#===========================================================================================================================================================================
#Module Description
#===========================================================================================================================================================================
"""Essential PyFRAP module containing :py:class:`pyfrp.subclasses.pyfrp_fit.fit` class.
"""
#===========================================================================================================================================================================
#Improting necessary modules
#===========================================================================================================================================================================
#Numpy/Scipy
import numpy as np
#PyFRAP Modules
from pyfrp.modules import pyfrp_misc_module
from pyfrp.modules import pyfrp_plot_module
from pyfrp.modules import pyfrp_fit_module
from pyfrp.modules import pyfrp_stats_module
from pyfrp.modules.pyfrp_term_module import *
#Time
import time
#===========================================================================================================================================================================
#Class definitions
#===========================================================================================================================================================================
class fit:
"""Main fit class of PyFRAP.
The purpose of the fit class is to save all attributes used for fitting PyFRAP simulation results to
data analysis results. The main attributes are:
* Fitting algorithm specifics:
* Fitting algorithm, see also :py:func:`setOptMeth`.
* Stopping criteria, see also :py:func:`setMaxfun` and :py:func:`setOptTol`.
* Initial guess, see also :py:func:`getX0`.
* Boundaries, see also :py:func:`getBounds`.
* Fitting options:
* ``fitProd``, see also :py:func:`getFitProd`.
* ``fitDegr``, see also :py:func:`getFitDegr`.
* ``fitPinned``, see also :py:func:`getFitPinned`.
* ``equOn``, see also :py:func:`getEqu`.
* ``fitCutOffT``, see also :py:func:`getFitCutOffT`.
* The ROIs to be fitted, see also :py:func:`getROIsFitted`.
* Fitting results, see also :py:func:`printResults`.
* Fitted vectors.
The most important methods are:
* :py:func:`run`: Runs fitting.
* :py:func:`addROIByName`: Adds ROI to be used for fitting.
* :py:func:`getX0`: Builds and returns current initial guess.
* :py:func:`getBounds`: Builds and returns current bounds.
* :py:func:`computeStats`: Compares post-fitting statistics.
The fit uses simulation and data vectors stored in all :py:class:`pyfrp.subclasses.pyfrp_ROI.ROI` objects defined in
``ROIsFitted`` list to compute the optimal values for ``DOptMu`` (``prodOpt`` or ``degrOpt`` if ``fitProd`` or ``fitDegr``
is selected, respectively).
After calling :py:func:`run`, will automatically compute proper ``x0`` via :py:func:`getX0` and :py:func:`getBounds`.
Args:
embryo (pyfrp.subclasses.pyfrp_embryo.embryo): Embryo object that fit belongs to.
name (str): Name of fit.
"""
#Create new fit object
def __init__(self,embryo,name):
#General Settings
self.name=name
self.embryo=embryo
#Optimization algorithm settings
self.optMeth="Constrained Nelder-Mead"
self.maxfun=1000
self.optTol=1e-10
#Dataseries selection
self.ROIsFitted=[]
#What parameters to fit
self.fitProd=False
self.fitDegr=False
#Equalization and pinning
self.equOn=True
self.fitPinned=True
self.equFacts=[]
self.LBEqu=0.1
self.UBEqu=3.
#Intial guess
self.x0=[10,0,0.]
#Bounds
self.LBProd=0.
self.UBProd=100.
self.LBDegr=0.
self.UBDegr=100.
self.LBD=0.01
self.UBD=300.
self.bounds=None
#More settings
self.kineticTimeScale=1.
self.bruteInitD=False
#Cutting tvec option
self.fitCutOffT=False
self.cutOffT=150
self.cutOffStepSim=self.embryo.simulation.stepsSim
self.cutOffStepData=self.embryo.nFrames
#Fit tracking
self.saveTrack=0
self.trackedParms=[]
self.trackedFits=[]
#Fitted Vectors
self.fittedVecs=[]
self.tvecFit=None
self.dataVecsFitted=[]
#Results
self.SSD=10000000
self.DOptMu=None
self.DOptPx=None
self.prodOpt=None
self.degrOpt=None
self.success=None
self.iterations=None
self.fcalls=None
#Statistics
self.Rsq=None
self.MeanRsq=None
self.RsqByROI={}
#Empty result dataseries
self.tvecFit=embryo.tvecData
def addROI(self,r):
"""Adds ROI to the list of fitted ROIs.
Args:
r (pyfrp.subclasses.pyfrp_ROI.ROI): ROI to be used for fitting.
Returns:
list: Updated list of ROIs used for fitting.
"""
if r not in self.ROIsFitted:
self.ROIsFitted.append(r)
self.x0.append(1.)
return self.ROIsFitted
def addROIByName(self,name):
"""Adds ROI to the list of fitted ROIs, given a specific name.
Args:
name (str): Name of ROI to be used for fitting.
Returns:
list: Updated list of ROIs used for fitting.
"""
r=self.embryo.getROIByName(name)
return self.addROI(r)
def addROIById(self,Id):
"""Adds ROI to the list of fitted ROIs, given a specific ROI Id.
Args:
Id (int): Id of ROI to be used for fitting.
Returns:
list: Updated list of ROIs used for fitting.
"""
r=self.embryo.getROIById(Id)
return self.addROI(r)
def getROIsFitted(self):
"""Returns list of ROIs used for fitting.
Returns:
list: list of ROIs used for fitting.
"""
return self.ROIsFitted
def removeROI(self,r):
"""Removes ROI from the list of fitted ROIs.
Args:
r (pyfrp.subclasses.pyfrp_ROI.ROI): ROI to be removed.
Returns:
list: Updated list of ROIs used for fitting.
"""
if r in self.ROIsFitted:
idx=self.ROIsFitted.index(r)
self.x0.pop(3+idx)
self.ROIsFitted.remove(r)
return self.ROIsFitted
def getX0(self):
"""Returns initial guess of fit in the form that is useful for
the call of the optimization algorithm.
Copies x0 into local variable to pass to solver, pop entries that are
currently not needed since they are turned off via ``fitProd`` or ``fitDegr``.
Always appends initial guess for equalization factors, even though they might not been used.
.. note:: Always gets executed at the start of ``run``.
Returns:
list: Currently used x0.
"""
x0=list(self.x0)
if self.fitProd and self.fitDegr:
pass
elif self.fitProd and not self.fitDegr:
x0.pop(2)
elif not self.fitProd and self.fitDegr:
x0.pop(1)
elif not self.fitProd and not self.fitDegr:
x0=list(self.x0)
x0.pop(2)
x0.pop(1)
return x0
def reset2DefaultX0(self):
"""Resets initial guess x0 to its default form.
The default form of x0 is
>>> [10., 0. ,0. , 1.,1.,1.]
The last entries are the initial guess of equlalization factors and is
set to be list of of ones of the same length of ``ROIsFitted``.
Returns:
list: New initial guess x0.
"""
equFacts=len(self.ROIsFitted)*[1.]
self.x0=[10,0,0]+equFacts
return self.x0
def getBounds(self):
"""Generates tuple of boundary tuples, limiting parameters
varied during SSD minimization.
Will generate exactly the boundary tuple that is currently
useful to the optimization algorithm, meaning that only
values that are needed since they are turned on via ``fitProd`` or ``fitDegr``
will be included into tuple.
Will use values that are stored in ``LBx`` and ``UBx``, where ``x`` is
``D``, ``Prod``, or ``Degr`` for the creation of the tuples.
Will also add a tuple of bounds defined via ``LBEqu`` and ``UBEqu`` for each
ROI in ``ROIsFitted``.
.. note:: Always gets executed at the start of ``run``.
Returns:
tuple: Boundary value tuple.
"""
if self.fitProd and self.fitDegr:
bnds = [(self.LBD, self.UBD), (self.LBD, self.UBD),(self.LBD,self.UBD)]
ranges=[slice(self.LBD,self.UBD,1),slice(self.LBProd,self.UBProd,10),slice(self.LBDegr,self.UBDegr,10)]
elif self.fitProd and not self.fitDegr:
bnds = [(self.LBD, self.UBD), (self.LBProd, self.UBProd)]
ranges=[slice(self.LBD,self.UBD,1),slice(self.LBProd,self.UBProd,10)]
elif not self.fitProd and self.fitDegr:
bnds = [(self.LBD, self.UBD), (self.LBDegr, self.UBDegr)]
ranges=[slice(self.LBD,self.UBD,1),slice(self.LBDegr,self.UBDegr,10)]
elif not self.fitProd and not self.fitDegr:
bnds = [(self.LBD, self.UBD),]
ranges=[1,self.UBD]
bnds=bnds+len(self.ROIsFitted)*[(self.LBEqu,self.UBEqu)]
ranges=ranges+len(self.ROIsFitted)*[slice(self.LBEqu,self.UBEqu,0.2)]
if self.optMeth=='brute':
self.bounds=tuple(ranges)
else:
self.bounds=tuple(bnds)
return self.bounds
def resultsToVec(self):
"""Puts results back in vector as optimization algorithm would return it.
Returns:
list: Result vector.
"""
x=[self.DOptPx]
if self.fitProd:
x.append(self.prodOpt)
if self.fitDegr:
x.append(self.degrOpt)
if self.equOn:
x=x+list(self.equFacts)
return x
def getFittedParameterNames(self):
"""Returns names of parameters that are selected for fitting.
Returns:
list: Names of parameters fitted.
"""
x=["DOptPx"]
if self.fitProd:
x.append("prod")
if self.fitDegr:
x.append("degr")
if self.equOn:
for r in self.ROIsFitted:
x.append(r.name+" equFact")
return x
def run(self,debug=False,ax=None):
"""Runs fit.
Fitting is done by passing fit object to :py:func:`pyfrp.modules.pyfrp_fit_module.FRAPFitting`.
This function then calls all necessary methods of fit to prepare it for optimization and
then passes it to optimization algorithm.
.. note:: If ``bruteInitD`` is turned on, will execute :py:func:`runBruteInit` instead.
Keyword Args:
debug (bool): Print debugging messages.
ax (matplotlib.axes): Axes to show debugging plots in.
Returns:
pyfrp.subclasses.pyfrp_fit.fit: ``self``.
"""
if self.bruteInitD:
self.runBruteInit(debug=debug,ax=ax)
else:
self=pyfrp_fit_module.FRAPFitting(self,debug=debug,ax=ax)
return self
def runBruteInit(self,debug=False,ax=None,steps=5,x0Ds=[]):
"""Runs fit for different initial guesses of the diffusion constant D, then
selects the one that actually yielded the minimal SSD.
Initially guesses are generated with :py:func:`getBruteInitDArray` if no array ``x0Ds``
is given.
Fitting is done by passing fit object to :py:func:`pyfrp.modules.pyfrp_fit_module.FRAPFitting`.
This function then calls all necessary methods of fit to prepare it for optimization and
then passes it to optimization algorithm.
Will select the initial guess that yielded the minimal SSD and then rerun with this x0 again, making
sure that everything is updated in fit object.
Keyword Args:
debug (bool): Print debugging messages.
ax (matplotlib.axes): Axes to show debugging plots in.
steps (int): How many initial guesses to generate.
x0Ds (list): Array with possible initial guesses for D.
Returns:
pyfrp.subclasses.pyfrp_fit.fit: ``self``.
"""
if x0Ds==[]:
x0Ds=self.getBruteInitDArray(steps=steps)
SSDs=[]
for x0D in x0Ds:
if debug:
print "Trying x0(D) = ", x0D
self.setX0D(x0D)
self=pyfrp_fit_module.FRAPFitting(self,debug=debug,ax=ax)
SSDs.append(self.SSD)
idxOpt=SSDs.index(min(SSDs))
if debug:
print "x0(D) yielding best result = ", x0Ds[idxOpt]
self.setX0D(x0Ds[idxOpt])
self=pyfrp_fit_module.FRAPFitting(self,debug=debug,ax=ax)
return self
def getBruteInitDArray(self,steps=5):
"""Generates array of different possibilities to be used as initial guess
for D.
If ``LBD`` and ``UBD`` is given, will simply divide the range between the two in 4 equidistant values.
Otherwise will vary around ``x0`` in 2 orders of magnitude.
Keyword Args:
steps (int): How many initial guesses to generate.
Returns:
list: Array with possible initial guesses for D.
"""
if self.LBD!=None:
LB=self.LBD
else:
LB=0.01*self.getX0D()
if self.UBD!=None:
UB=self.UBD
else:
UB=100*self.getX0D()
x0=np.linspace(LB+1E-10,UB-1E-10,steps)
return list(x0)
def assignOptParms(self,res):
r"""Assigns optimal parameters found by optimization algorithm to
attributes in fit object depending on fit options chosen.
Args:
res (list): Result array from optimization algorithm.
Returns:
tuple: Tuple containing:
* DOptPx (float): Optimal diffusion coefficient in :math:`\frac{\mathrm{px}^2}{s}}`.
* prod (float): Optimal production rate in :math:`\frac{\[c\]}{s}}`.
* degr (float): Optimal degradation rate in :math:`\frac{1}{s}}`.
* DOptMu (float): Optimal diffusion coefficient in :math:`\frac{\mu\mathrm{m}^2}{s}}`.
"""
if self.fitProd and self.fitDegr:
self.DOptPx=res[0]
self.prodOpt=res[1]/self.kineticTimeScale
self.degrOpt=res[2]/self.kineticTimeScale
elif self.fitProd and not self.fitDegr:
self.DOptPx=res[0]
self.prodOpt=res[1]/self.kineticTimeScale
self.degrOpt=self.x0[2]/self.kineticTimeScale
elif not self.fitProd and self.fitDegr:
self.DOptPx=res[0]
self.prodOpt=self.x0[1]/self.kineticTimeScale
self.degrOpt=res[1]/self.kineticTimeScale
elif not self.fitProd and not self.fitDegr:
self.DOptPx=res[0]
self.prodOpt=self.x0[1]/self.kineticTimeScale
self.degrOpt=self.x0[2]/self.kineticTimeScale
self.DOptMu=self.DOptPx*self.embryo.convFact**2
return self.DOptPx, self.prodOpt, self.degrOpt, self.DOptMu
def plotFit(self,ax=None,legend=True,title=None,show=True):
"""Plots fit, showing the result for all fitted ROIs.
.. note:: If no ``ax`` is given, will create new one.
.. image:: ../imgs/pyfrp_fit/fit.png
Keyword Args:
ax (matplotlib.axes): Axes used for plotting.
legend (bool): Show legend.
title (str): Title of plot.
show (bool): Show plot.
Returns:
matplotlib.axes: Axes used for plotting.
"""
for r in self.ROIsFitted:
ax=r.plotFit(self,ax=ax,legend=legend,title=title,show=show)
return ax
def printResults(self):
"""Prints out main results of fit."""
printObjAttr('name',self)
printObjAttr('DOptMu',self)
printObjAttr('DOptPx',self)
printObjAttr('prodOpt',self)
printObjAttr('degrOpt',self)
printObjAttr('equFacts',self)
printObjAttr('success',self)
printObjAttr('Rsq',self)
printObjAttr('MeanRsq',self)
printObjAttr('RsqByROI',self)
return True
def printAllAttr(self):
"""Prints out all attributes of fit object."""
printAllObjAttr(self)
def resultsToDict(self):
"""Extracts all important results into dictionary, making
it easier for printout or csv extraction.
"""
parms=["DOptMu","DOptPx","prodOpt","degrOpt","success","Rsq","MeanRsq","fitDegr","fitProd","fitPinned","equOn","x0"]
dic=pyfrp_misc_module.objAttr2Dict(self,attr=parms)
roiNames=pyfrp_misc_module.objAttrToList(self.ROIsFitted,"name")
#equFacts=np.asarray(self.equFacts).astype(str)
dic["ROIsFitted"]=" , ".join(roiNames)
for i in range(len(roiNames)):
if self.equOn:
dic["equFactor "+roiNames[i]]=self.equFacts[i]
else:
dic["equFactor "+roiNames[i]]=""
dic["Rsq("+roiNames[i]+")"]=self.RsqByROI[roiNames[i]]
return dic
def setBruteInitD(self,b):
"""Turns on/off if the initial guess of for the diffusion rate D should be bruteforced.
Args:
b (bool): Flag value.
Returns:
bool: Current flag value.
"""
self.bruteInitD=b
return self.bruteInitD
def setOptMeth(self,m):
"""Sets optimization method.
Available optimization methods are:
* Constrained Nelder-Mead
* Nelder-Mead
* TNC
* L-BFGS-B
* SLSQP
* brute
* BFGS
* CG
See also http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.minimize.html and
http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.optimize.brute.html#scipy.optimize.brute .
You can find out more about the constrained Nelder-Mead algorithm in the documentation of
:py:func:`pyfrp.modules.pyfrp_optimization_module.constrObjFunc`.
Args:
m (str): New method.
"""
self.optMeth=m
return self.optMeth
def getOptMeth(self):
"""Returns the currently used optimization algorithm.
Returns:
str: Optimization algorithm.
"""
return self.optMeth
def isFitted(self):
"""Checks if fit already has been run and succeeded.
Returns:
bool: ``True`` if success.
"""
return self.DOptMu!=None
def setEqu(self,b):
"""Turns on/off equalization.
Args:
b (bool): New flag value.
Returns:
bool: New flag value.
"""
self.equOn=b
return self.equOn
def setFitPinned(self,b):
"""Turns on/off if pinned series are supposed to be fitted.
Args:
b (bool): New flag value.
Returns:
bool: New flag value.
"""
self.fitPinned=b
return self.fitPinned
def setFitProd(self,b):
"""Turns on/off if production is supposed to be considered in fit.
Args:
b (bool): New flag value.
Returns:
bool: New flag value.
"""
self.fitProd=b
return self.fitProd
def setFitDegr(self,b):
"""Turns on/off if degradation is supposed to be considered in fit.
Args:
b (bool): New flag value.
Returns:
bool: New flag value.
"""
self.fitDegr=b
return self.fitDegr
def setSaveTrack(self,b):
"""Turns on/off if fitting process is supposed to be stored.
This then can then be used to following the convergence of
the optimization algorithm and possibly to identify local minima.
Args:
b (bool): New flag value.
Returns:
bool: New flag value.
"""
self.saveTrack=b
return self.saveTrack
def setFitCutOffT(self,b):
"""Turns on/off if only a certain fraction of the timeseries
is supposed to be fitted.
.. warning:: This option is currently VERY experimental. Fitting might
crash.
Args:
b (bool): New flag value.
Returns:
bool: New flag value.
"""
printWarning("CutOffT Option is currently VERY experimental. Fitting might crash.")
self.fitCutOffT=b
return self.fitCutOffT
def setCutOffT(self,t):
self.cutOffT=t
return self.cutOffT
def setMaxfun(self,m):
"""Sets maximum number of function evaluations at
which optimization algorithm stops.
Args:
m (int): New maximum number of function evaluations.
"""
self.maxfun=m
return self.maxfun
def setOptTol(self,m):
"""Sets tolerance level at which optimization algorithm stops.
Args:
m (float): New tolerance level.
"""
self.optTol=m
return self.optTol
def setLBD(self,b):
"""Sets the lower bound for the diffusion rate.
Args:
b (float): New lower bound for diffusion rate.
"""
self.LBD=b
return self.LBD
def setLBProd(self,b):
"""Sets the lower bound for the production rate.
Args:
b (float): New lower bound for production rate.
"""
self.LBProd=b
return self.LBProd
def setLBDegr(self,b):
"""Sets the lower bound for the degradation rate.
Args:
b (float): New lower bound for degradation rate.
"""
self.LBDegr=b
return self.LBDegr
def setUBD(self,b):
"""Sets the upper bound for the diffusion rate.
Args:
b (float): New upper bound for diffusion rate.
"""
self.UBD=b
return self.UBD
def setUBProd(self,b):
"""Sets the upper bound for the production rate.
Args:
b (float): New upper bound for production rate.
"""
self.UBProd=b
return self.UBProd
def setUBDegr(self,b):
"""Sets the upper bound for the degradation rate.
Args:
b (float): New upper bound for degradation rate.
"""
self.UBDegr=b
return self.UBDegr
def getEqu(self):
"""Returns equalization flag.
Returns:
bool: Current flag value.
"""
return self.equOn
def getFitPinned(self):
"""Returns flag controlling if pinned timeseries are supposed to be used
for fitting.
Returns:
bool: Current flag value.
"""
return self.fitPinned
def getFitProd(self):
"""Returns flag controlling if a production term is supposed to be used
for fitting.
Returns:
bool: Current flag value.
"""
return self.fitProd
def getFitDegr(self):
"""Returns flag controlling if a degredation term is supposed to be used
for fitting.
Returns:
bool: Current flag value.
"""
return self.fitDegr
def getSaveTrack(self):
"""Returns flag controlling if whole fitting process is supposed to be saved
in fit object.
Returns:
bool: Current flag value.
"""
return self.saveTrack
def getFitCutOffT(self):
"""Returns flag controlling if only the first ``cutOffT`` timesteps are supposed to be fitted.
.. warning:: This option is currently VERY experimental. Fitting might
crash.
Returns:
bool: Current flag value.
"""
return self.fitCutOffT
def getCutOffT(self):
"""Returns timepoint at which timeseries are cut if ``fitCutOffT`` is turned on.
.. warning:: This option is currently VERY experimental. Fitting might
crash.
Returns:
float: Timepoint.
"""
return self.cutOffT
def getMaxfun(self):
"""Returns maximum number of function evaluations at
which optimization algorithm stops.
Returns:
int: Current maximum number of function evaluations.
"""
return self.maxfun
def getOptTol(self):
"""Returns tolerance level at which optimization algorithm stops.
Returns:
float: Current tolerance level.
"""
return self.optTol
def getLBD(self):
"""Returns the lower bound for the diffusion rate.
Returns:
float: Current lower bound for diffusion rate.
"""
return self.LBD
def getLBProd(self):
"""Returns the lower bound for the production rate.
Returns:
float: Current lower bound for production rate.
"""
return self.LBProd
def getLBDegr(self):
"""Returns the lower bound for the degradation rate.
Returns:
float: Current lower bound for degradation rate.
"""
return self.LBDegr
def getUBD(self):
"""Returns the upper bound for the diffusion rate.
Returns:
float: Current upper bound for diffusion rate.
"""
return self.UBD
def getUBProd(self):
"""Returns the upper bound for the production rate.
Returns:
float: Current upper bound for production rate.
"""
return self.UBProd
def getUBDegr(self):
"""Returns the upper bound for the degradation rate.
Returns:
float: Current upper bound for degradation rate.
"""
return self.UBDegr
def setKineticTimeScale(self,s):
"""Sets the kinetic time scale factor used for fitting.
Args:
s (float): New kinetic time scale factor.
"""
self.kineticTimeScale=s
return self.kineticTimeScale
def getKineticTimeScale(self):
"""Returns the kinetic time scale factor used for fitting.
Returns:
float: Current kinetic time scale factor.
"""
return self.kineticTimeScale
def setName(self,s):
"""Sets name of fit.
Args:
s (str): New name of fit.
"""
self.name=s
return self.name
def getName(self):
"""Returns name of fit.
Returns:
str: Name of fit.
"""
return self.name
def setX0Equ(self,x):
"""Sets the initial guess for the equalization factor.
.. note:: Does this for all ROIs in ROIsFitted.
Args:
x (float): Initial guess for equalization factor.
"""
for i in range(3,len(self.x0)):
self.x0[i]=x
return self.x0
def getX0Equ(self,x):
"""Returns the initial guess for the equalization factor for
all ROIs fitted.
Returns:
list: Initial guess for equalization factor.
"""
return self.x0[3:]
def setX0D(self,x):
"""Sets the initial guess for the diffusion rate.
Args:
x (float): Initial guess for diffusion rate.
"""
self.x0[0]=x
return self.x0[0]
def setX0Prod(self,x):
"""Sets the initial guess for the production rate.
Args:
x (float): Initial guess for production rate.
"""
self.x0[1]=x
return self.x0[1]
def setX0Degr(self,x):
"""Sets the initial guess for the degradation rate.
Args:
x (float): Initial guess for degradation rate.
"""
self.x0[2]=x
return self.x0[2]
def getX0D(self):
"""Returns the initial guess for the diffusion rate.
Returns:
float: Initial guess for diffusion rate.
"""
return self.x0[0]
def getX0Prod(self):
"""Returns the initial guess for the production rate.
Returns:
float: Initial guess for production rate.
"""
return self.x0[1]
def getX0Degr(self):
"""Returns the initial guess for the degradation rate.
Returns:
float: Initial guess for degration rate.
"""
return self.x0[2]
def setX0(self,x):
"""Sets the initial guess ``x0``.
Argument ``x`` needs to have length 3, otherwise it is being rejected.
.. note:: If ``fitProd`` or ``fitDegr`` are not chosen, the values in
``x0`` are going to be used as static parameters.
Args:
x (list): New desired initial guess.
Returns:
list: New initial guess.
"""
if len(x)==3:
self.x0=x
else:
printError("Length of x0 is not 3, not going to change it.")
return self.x0
def checkPinned(self):
"""Checks if all ROIs in ``ROIsFitted`` have been pinned.
Returns:
bool: ``True`` if all ROIs have been pinned, ``False`` else.
"""
b=True
for i,r in enumerate(self.ROIsFitted):
b = b + len(self.embryo.tvecData)==len(r.dataVecPinned) + len(self.embryo.simulation.tvecSim)==len(r.simVecPinned)
return b
def checkSimulated(self):
"""Checks if all ROIs in ``ROIsFitted`` have been simulated.
Returns:
bool: ``True`` if all ROIs have been simulated, ``False`` else.
"""
b=True
for r in self.ROIsFitted:
b = b + len(r.simVec)==len(self.embryo.simulation.tvecSim)
return b
def updateVersion(self):
"""Updates fit object to current version, making sure that it possesses
all attributes.
Creates a new fit object and compares ``self`` with the new fit object.
If the new fit object has a attribute that ``self`` does not have, will
add attribute with default value from the new fit object.
Returns:
pyfrp.subclasses.pyfrp_fit.fit: ``self``
"""
fittemp=fit(self.embryo,"temp")
pyfrp_misc_module.updateObj(fittemp,self)
return self
def computeStats(self):
"""Computes stastics for fit.
Statistics include:
* ``MeanRsq``
* ``Rsq``
* ``RsqByROI``
"""
self=pyfrp_stats_module.computeFitRsq(self)
def printRsqByROI(self):
"""Prints out Rsq value per ROI.
"""
print "Rsq Values by ROI for fit ", self.name
printDict(self.RsqByROI)
def getNParmsFitted(self,inclEqu=True):
"""Returns the number of parameters fitted in this fit.
.. note:: If equlalization is turned on, each ROI in ``ROIsFitted``
counts as an extra parameter.
Example: We fit production and equalization for 2 ROIs, then we have fitted
* D
* degradation
* equalization ROI 1
* equalization ROI 2
leading to in total 4 fitted parameters.
Keyword Args:
inclEqu (bool): Include equalization as additional fitted parameter.
Returns:
int: Number of parameters fitted.
"""
return 1+int(self.getFitProd())+int(self.getFitDegr())+int(inclEqu)*int(self.getEqu())*len(self.equFacts)
def plotLikehoodProfiles(self,epsPerc=0.1,steps=100,debug=False):
"""Plots likelihood profiles for all fitted parameters.
.. warning:: Since we don't yet fit the loglikelihood function, we only plot the
SSD. Even though the SSD is proportional to the loglikelihood, it should be used
carefully.
See also :py:func:`pyfrp.modules.pyfrp_fit_module.plotFitLikehoodProfiles`.
Keyword Args:
epsPerc (float): Percentage of variation.
steps (int): Number of values around optimal parameter value.
debug (bool): Show debugging messages
Returns:
list: List of matplotlib.axes objects used for plotting.
"""
axes=pyfrp_fit_module.plotFitLikehoodProfiles(self,epsPerc=0.1,steps=100,debug=debug)
return axes
def setOpts(self,opts):
"""Sets a list of options.
Options are given as a dictionary and then subsequentially set.
Args:
opts (dict): Options.
"""
for opt in opts:
try:
setattr(self,opt,opts[opt])
except AttributeError:
printError("Cannot set fit option " + opt +". Option does not exist.")
| gpl-3.0 |
magic2du/contact_matrix | Contact_maps/mnist_psuedo_ipython_dl_ppi/code/DL_Stacked_Model_Mnist_Psuedo_01_08_2015_02.py | 1 | 24951 |
# coding: utf-8
# In[1]:
# this part imports libs and load data from csv file
import sys
sys.path.append('../../../libs/')
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pickle
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
import cPickle
import gzip
import os
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb, PIL
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[2]:
# set settings for this script
settings = {}
settings['fisher_mode'] = 'FisherM1'
settings['with_auc_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['SVM_RBF'] = 1
settings['SVM_POLY'] = 1
settings['DL'] = 1
settings['Log'] = 1
settings['SAE_SVM'] = 1
settings['SAE_SVM_RBF'] = 1
settings['SAE_SVM_POLY'] = 1
settings['DL_S'] = 1
settings['SAE_S_SVM'] = 1
settings['SAE_S_SVM_RBF'] = 1
settings['SAE_S_SVM_POLY'] = 1
settings['number_iterations'] = 10
settings['finetune_lr'] = 0.1
settings['batch_size'] = 30
settings['pretraining_interations'] = 10002#10000
settings['pretrain_lr'] = 0.001
#settings['training_epochs'] = 300 #300
settings['training_interations'] = 30000 #300
settings['hidden_layers_sizes'] = [200, 200]
settings['corruption_levels'] = [0.25, 0.25]
settings['number_of_training'] = [10000]#[1000, 2500, 5000, 7500, 10000]
settings['test_set_from_test'] = True
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_handwritten_digits' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[3]:
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
X_train,y_train = train_set
X_valid,y_valid = valid_set
X_total=np.vstack((X_train, X_valid))
X_total = np.array(X_total, dtype= theano.config.floatX)
print'sample size', X_total.shape
y_total = np.concatenate([y_train, y_valid])
# In[5]:
################## generate data from training set###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole
# In[7]:
#pylab.imshow(imageB.reshape(28, 28), cmap="Greys")
# In[8]:
def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
writer.writerow(['no.', 'number_of_training', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
def run_models(settings = None):
analysis_scr = []
with_auc_score = settings['with_auc_score']
for subset_no in xrange(1,settings['number_iterations']+1):
print("Subset:", subset_no)
################## generate data ###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole.shape
x_train_pre_validation, x_test, y_train_pre_validation, y_test = train_test_split(X_whole,y_whole, test_size=0.2, random_state=211)
for number_of_training in settings['number_of_training']:
x_train, x_validation, y_train, y_validation = train_test_split(x_train_pre_validation[:number_of_training],
y_train_pre_validation[:number_of_training],\
test_size=0.2, random_state=21)
print x_train.shape, y_train.shape, x_validation.shape, y_validation.shape, x_test.shape, y_test.shape
x_train_minmax, x_validation_minmax, x_test_minmax = x_train, x_validation, x_test
train_X_reduced = x_train
train_y_reduced = y_train
test_X = x_test
test_y = y_test
###original data###
################ end of data ####################
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
if settings['SVM']:
print "SVM"
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, y_train)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, y_train)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['Log']:
print "Log"
log_clf_l2 = sklearn.linear_model.LogisticRegression(C=1, penalty='l2')
log_clf_l2.fit(scaled_train_X, train_y_reduced)
predicted_test_y = log_clf_l2.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'Log', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = log_clf_l2.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'Log', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes = settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train,
x_validation_minmax, y_validation,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
test_predicted = sda.predict(x_test_minmax)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
training_predicted = sda.predict(x_train_minmax)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
####transformed original data####
x = train_X_reduced
a_MAE_original = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_original.transform(train_X_reduced)
new_x_test_minmax_A = a_MAE_original.transform(x_test_minmax)
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_A)
new_x_train_scaled = standard_scaler.transform(new_x_train_minmax_A)
new_x_test_scaled = standard_scaler.transform(new_x_test_minmax_A)
new_x_train_combo = np.hstack((scaled_train_X, new_x_train_scaled))
new_x_test_combo = np.hstack((scaled_test_X, new_x_test_scaled))
if settings['SAE_SVM']:
# SAE_SVM
print 'SAE followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
# SAE_SVM
print 'SAE followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_POLY']:
# SAE_SVM
print 'SAE followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
#### separated transformed data ####
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_whole)
new_x_train_minmax_whole_scaled = standard_scaler.transform(new_x_train_minmax_whole)
new_x_test_minmax_whole_scaled = standard_scaler.transform(new_x_test_minmax_whole)
if settings['DL_S']:
# deep learning using split network
sda_transformed = trainSda(new_x_train_minmax_whole, y_train,
new_x_validationt_minmax_whole, y_validation ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
predicted_test_y = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'DL_S', isTest) + tuple(performance_score(y_test, predicted_test_y, with_auc_score).values()))
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
if settings['SAE_S_SVM']:
print 'SAE_S followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_S_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SAE_S_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_RBF']:
print 'SAE S followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_S_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_POLY']:
# SAE_SVM
print 'SAE S followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
report_name = 'DL_handwritten_digits' + '_size_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + '_' + str(settings['pretraining_interations']) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr)
return sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr
# In[9]:
sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr = run_models(settings)
# In[48]:
# save objects
sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+'_'+'sda.pickle', 'wb') as handle:
pickle.dump(sda, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+'_'+'a_MAE_original.pickle', 'wb') as handle:
pickle.dump(a_MAE_original, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+'_'+'a_MAE_A.pickle', 'wb') as handle:
pickle.dump(a_MAE_A, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+'_'+'a_MAE_B.pickle', 'wb') as handle:
pickle.dump(a_MAE_B, handle)
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
# In[ ]:
# In[31]:
'''
weights_map_to_input_space = []
StackedNNobject = sda
image_dimension_x = 28*2
image_dimension_y = 28
if isinstance(StackedNNobject, SdA) or isinstance(StackedNNobject, MultipleAEs):
weights_product = StackedNNobject.dA_layers[0].W.get_value(borrow=True)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_0_layer_weights.png'
image.save(sample_image_path)
weights_map_to_input_space.append(weights_product)
for i_layer in range(1, len(StackedNNobject.dA_layers)):
i_weigths = StackedNNobject.dA_layers[i_layer].W.get_value(borrow=True)
weights_product = np.dot(weights_product, i_weigths)
weights_map_to_input_space.append(weights_product)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_'+ str(i_layer)+ '_layer_weights.png'
image.save(sample_image_path)
'''
# In[18]:
| gpl-2.0 |
AaronWatters/inferelator_ng | inferelator_ng/tests/test_design_response.py | 1 | 10286 | import unittest, os
import pandas as pd
import numpy as np
import pdb
from .. import design_response_translation
from .. import utils
my_dir = os.path.dirname(__file__)
class TestDR(unittest.TestCase):
"""
Superclass for common methods
"""
def calculate_design_and_response(self):
#drd = design_response_R.DRDriver()
drd = design_response_translation.PythonDRDriver()
target = drd.target_directory = os.path.join(my_dir, "artifacts")
if not os.path.exists(target):
os.makedirs(target)
drd.delTmin = self.delT_min
drd.delTmax = self.delT_max
drd.tau = self.tau
(self.design, self.response) = drd.run(self.exp, self.meta)
class TestSpecialCharacter(TestDR):
def setUp(self):
spchrs='~!@#$%^&*()_-+=|\}]{[;:/?.><\\'
self.meta = pd.DataFrame()
self.meta['isTs']=[True, True, True, True, False]
self.meta['is1stLast'] = ['f','m','m','l','e']
self.meta['prevCol'] = ['NA','ts1'+spchrs,'ts2'+spchrs,'ts3'+spchrs, 'NA']
self.meta['del.t'] = ['NA', 3, 2, 5, 'NA']
self.meta['condName'] = ['ts1'+spchrs,'ts2'+spchrs,'ts3'+spchrs,'ts4'+spchrs,'ss']
self.exp = pd.DataFrame(np.reshape(range(10), (2,5)) + 1,
index = ['gene' + str(i + 1) + spchrs for i in range(2)],
columns = ['ts' + str(i + 1) + spchrs for i in range(4)] + ['ss'])
self.delT_min = 2
self.delT_max = 4
self.tau = 2
self.calculate_design_and_response()
def testspecialcharacter(self):
spchrs='~!@#$%^&*()_-+=|\}]{[;:/?.><\\'
ds, resp = (self.design, self.response)
expression_1 = np.array(list(self.exp['ts1' + spchrs]))
expression_2 = np.array(list(self.exp['ts2' + spchrs]))
expected_response_1 = (expression_1 + self.tau * (expression_2 - expression_1) / (float(self.meta['del.t'][1])))
expression_3 = np.array(list(self.exp['ts3' + spchrs]))
expected_response_2 = expression_2 + self.tau * (expression_3 - expression_2) / (float(self.meta['del.t'][2]))
np.testing.assert_almost_equal(np.array(resp['ts1' + spchrs]), expected_response_1)
np.testing.assert_almost_equal(np.array(resp['ts2' + spchrs]), expected_response_2)
class TestDRModelOrganisms(TestDR):
def test_on_bsubtilis(self):
self.exp = utils.df_from_tsv('data/bsubtilis/expression.tsv')
self.meta = utils.df_from_tsv('data/bsubtilis/meta_data.tsv', has_index=False)
expected_design = utils.df_from_tsv('data/bsubtilis/bsubtilis_design_matrix.tsv')
expected_response = utils.df_from_tsv('data/bsubtilis/bsubtilis_response_matrix.tsv')
self.delT_min = 0
self.delT_max = 110
self.tau = 45
self.calculate_design_and_response()
np.testing.assert_allclose(self.response.values, expected_response.values, atol=1e-15)
self.assertEqual(len(set(expected_response.columns)), len(set(self.response.columns)))
self.assertEqual(expected_response.columns.tolist(), self.response.columns.tolist())
self.assertEqual(expected_response.index.tolist(), self.response.index.tolist())
self.assertTrue(pd.DataFrame.equals(expected_design, self.design))
class TestDRAboveDeltMax(TestDR):
def setUp(self):
self.meta = pd.DataFrame()
self.meta['isTs']=[True, True, True, True, False]
self.meta['is1stLast'] = ['f','m','m','l','e']
self.meta['prevCol'] = ['NA','ts1','ts2','ts3', 'NA']
self.meta['del.t'] = ['NA', 3, 2, 5, 'NA']
self.meta['condName'] = ['ts1','ts2','ts3','ts4','ss']
self.exp = pd.DataFrame(np.reshape(range(10), (2,5)) + 1,
index = ['gene' + str(i + 1) for i in range(2)],
columns = ['ts' + str(i + 1) for i in range(4)] + ['ss'])
self.delT_min = 2
self.delT_max = 4
self.tau = 2
self.calculate_design_and_response()
def test_design_matrix_above_delt_max(self):
# Set up variables
ds, resp = (self.design, self.response)
self.assertEqual(ds.shape, (2, 4))
self.assertEqual(list(ds.columns), ['ts4', 'ss', 'ts1', 'ts2'],
msg = "Guarantee that the ts3 condition is dropped, "
"since its delT of 5 is greater than delt_max of 4")
for col in ds:
self.assertEqual(list(ds[col]), list(self.exp[col]),
msg = ('{} column in the design matrix should be equal '
'to that column in the expression matrix').format(col))
self.assertEqual(list(ds['ss']), [5, 10])
self.assertEqual(list(ds['ss']), list(resp['ss']),
msg = 'Steady State design and response should be equal')
self.assertTrue((resp['ts2'].values == [3, 8]).all())
def test_response_matrix_steady_state_above_delt_max(self):
ds, resp = (self.design, self.response)
self.assertEqual(list(resp.columns), ['ts4', 'ss', 'ts1', 'ts2'])
self.assertEqual(list(resp['ts4']), list(self.exp['ts4']))
self.assertEqual(list(resp['ss']), list(self.exp['ss']))
def test_response_matrix_time_series_above_delt_max(self):
ds, resp = (self.design, self.response)
expression_1 = np.array(list(self.exp['ts1']))
expression_2 = np.array(list(self.exp['ts2']))
expected_response_1 = (expression_1 + self.tau * (expression_2 - expression_1) / (
float(self.meta['del.t'][1])))
expression_3 = np.array(list(self.exp['ts3']))
expected_response_2 = expression_2 + self.tau * (expression_3 - expression_2) / (
float(self.meta['del.t'][2]))
np.testing.assert_almost_equal(np.array(resp['ts1']), expected_response_1)
np.testing.assert_almost_equal(np.array(resp['ts2']), expected_response_2)
class TestDRMicro(TestDR):
def setUp(self):
self.meta = pd.DataFrame()
self.meta['isTs']=[False, False]
self.meta['is1stLast'] = ['e','e']
self.meta['prevCol'] = ['NA','NA']
self.meta['del.t'] = ['NA', 'NA']
self.meta['condName'] = ['ss1', 'ss2']
self.exp = pd.DataFrame(
np.reshape(range(4), (2, 2)) + 1,
index=['gene' + str(i + 1) for i in range(2)],
columns=['ss1', 'ss2'])
self.delT_min = 2
self.delT_max = 4
self.tau = 2
self.calculate_design_and_response()
def test_micro(self):
ds, resp = (self.design, self.response)
self.assertEqual(ds.shape, (2, 2))
self.assertTrue((ds['ss1'].values == [1, 3]).all())
self.assertTrue((ds['ss2'].values == [2, 4]).all())
# In steady state, expect design and response to be identical
self.assertTrue(ds.equals(resp))
class TestDRBelowDeltMin(TestDR):
def setUp(self):
self.meta = pd.DataFrame()
self.meta['isTs']=[True, True, True, True, False]
self.meta['is1stLast'] = ['f','m','m','l','e']
self.meta['prevCol'] = ['NA','ts1','ts2','ts3', 'NA']
self.meta['del.t'] = ['NA', 1, 2, 3, 'NA']
self.meta['condName'] = ['ts1', 'ts2', 'ts3', 'ts4', 'ss']
self.exp = pd.DataFrame(
np.reshape(range(10), (2, 5)) + 1,
index=['gene' + str(i + 1) for i in range(2)],
columns=['ts' + str(i + 1) for i in range(4)] + ['ss'])
self.delT_min = 2
self.delT_max = 4
self.tau = 2
self.calculate_design_and_response()
def test_response_matrix_below_delt_min(self):
ds, resp = (self.design, self.response)
expression_1 = np.array(list(self.exp['ts1']))
expression_3 = np.array(list(self.exp['ts3']))
expected_response_1 = expression_1 + self.tau * (expression_3 - expression_1) / (float(self.meta['del.t'][1]) + float(self.meta['del.t'][2]))
np.testing.assert_almost_equal(np.array(resp['ts1']), expected_response_1)
#pdb.set_trace()
@unittest.skip("skipping until we've determined if we want to modify the legacy R code")
def test_design_matrix_headers_below_delt_min(self):
ds, resp = (self.design, self.response)
print(ds.columns)
self.assertEqual(list(ds.columns), ['ss', 'ts1', 'ts2', 'ts3'],
msg = "Guarantee that the ts4 condition is dropped, since its the last in the time series")
class TestBranchingTimeSeries(TestDR):
def setUp(self):
self.meta = pd.DataFrame()
self.meta['isTs']=[True, True, True]
self.meta['is1stLast'] = ['f','l','l']
self.meta['prevCol'] = ['NA','ts1','ts1']
self.meta['del.t'] = ['NA', 2, 2]
self.meta['condName'] = ['ts1','ts2','ts3']
self.exp = pd.DataFrame(np.reshape(range(9), (3,3)) + 1,
index = ['gene' + str(i + 1) for i in range(3)],
columns = ['ts' + str(i + 1) for i in range(3)])
self.delT_min = 1
self.delT_max = 4
self.tau = 1
self.calculate_design_and_response()
def test_design_matrix_branching_time_series(self):
ds, resp = (self.design, self.response)
self.assertEqual(ds.shape, (3, 2))
self.assertEqual(list(ds.columns), ['ts1_dupl01', 'ts1_dupl02'],
msg = 'This is how the R code happens to name branching time series')
for col in ds:
self.assertEqual(list(ds[col]), list(self.exp['ts1']),
msg = '{} column in the design matrix should be equal to the branching source, ts1, in the exp matrix'.format(col))
def test_response_matrix_branching_time_series(self):
ds, resp = (self.design, self.response)
self.assertEqual(resp.shape, (3, 2))
expression_1 = np.array(list(self.exp['ts1']))
expression_2 = np.array(list(self.exp['ts2']))
expected_response_1 = (expression_1 + self.tau * (expression_2 - expression_1) /
float(self.meta['del.t'][1]))
expression_3 = np.array(list(self.exp['ts3']))
expected_response_2 = (expression_1 + self.tau * (expression_3 - expression_1) /
float(self.meta['del.t'][2]))
np.testing.assert_almost_equal(np.array(resp['ts1_dupl01']), expected_response_1)
np.testing.assert_almost_equal(np.array(resp['ts1_dupl02']), expected_response_2)
| bsd-2-clause |
tpoy0099/option_calculator | gui_impl/mw_calculator.py | 1 | 12599 | #coding=utf8
import threading as THD
import datetime as DT
import matplotlib.pyplot as PLT
from gui_impl.qt_mvc_impl import MatrixModel, AutoFormDelegate
from gui_impl.position_editor import PosEditor
from gui_impl.qtableview_utility import getSelectedRows
from engine_algorithm.calculate_engine import Engine
from utility.messager import MessageQueue
from utility.self_defined_types import MessageTypes, XAxisType
from engine_algorithm.database_adaptor import OPTION_DF_HEADERS
from engine_algorithm.database_adaptor import STOCK_DF_HEADERS
from engine_algorithm.database_adaptor import PORTFOLIO_DF_HEADERS
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#ui created by qt designer
from qt_ui.ui_main_window import Ui_MainWindow
#############################################################################
def decorateAndPlot(sp, x_ls, y_ls, title=None, central_x=0):
sp.plot(x_ls, y_ls, color="green", linewidth=2, linestyle="-")
sp.plot([central_x,central_x], sp.get_ylim(), color="blue", linewidth=0.5, linestyle="--")
if title:
sp.set_title(title)
sp.grid(True)
return
def plotZeroLine(sp):
sp.plot(sp.get_xlim(), [0,0], color="blue", linewidth=0.5, linestyle="-")
#############################################################################
class OptionCalculator(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(OptionCalculator, self).__init__(parent)
self.setupUi(self)
#show
self.show()
#define signal&slot
self.connect(self.fresh_quotes_button, SIGNAL('clicked()'), self.__onRefreshQuoteBtClicked)
self.connect(self.edit_position_button, SIGNAL('clicked()'), self.__onEditPosBtClicked)
self.connect(self.plot_button, SIGNAL('clicked()'), self.__onPlotBtClicked)
self.connect(self, SIGNAL('PLOT_SENSIBILITY'), self.__plotGreeksSensibility)
self.connect(self, SIGNAL('PLOT_EXERCISE_CURVE'), self.__plotExerciseCurve)
self.connect(self, SIGNAL('SET_ETF_DISPLAY'), self.__setEtfDataDisplay)
self.connect(self, SIGNAL('SET_CENTRAL_DISPLAY'), self.__setCentralTableDisplay)
self.connect(self, SIGNAL('ENGINE_ERROR'), self.__notifyErrorOccur)
#init position vtable
self.option_data = MatrixModel(self)
self.pos_deleg = AutoFormDelegate(self)
self.position_vtable.setItemDelegate(self.pos_deleg)
self.position_vtable.setModel(self.option_data)
self.option_data.setSize(0, OPTION_DF_HEADERS)
#init stock_position vtable
self.stock_data = MatrixModel(self)
self.stpos_deleg = AutoFormDelegate(self)
self.stock_position_vtable.setItemDelegate(self.stpos_deleg)
self.stock_position_vtable.setModel(self.stock_data)
self.stock_data.setSize(0, STOCK_DF_HEADERS)
#init portfolio vtable
self.portfolio_data = MatrixModel(self)
self.ptf_deleg = AutoFormDelegate(self)
self.portfolio_vtable.setItemDelegate(self.ptf_deleg)
self.portfolio_vtable.setModel(self.portfolio_data)
self.portfolio_data.setSize(0, PORTFOLIO_DF_HEADERS)
#gui communication
self.msg = None
self.msg_event = None
self.msg_thread = None
#data engine
self.engine = None
#flow control
self.is_updating = False
self.auto_refresh_timer = None
#qt child
self.edit_dialog = PosEditor(self)
self.edit_dialog.setControler(self)
return
def start(self):
#gui communication
self.msg = MessageQueue()
self.msg_event = THD.Event()
self.msg_thread = THD.Thread(target=self.__handleMessage)
self.msg_thread.start()
#data engine
self.engine = Engine(self)
self.__startAutoRefresh(True)
self.engine.qryEtfQuoteFeed()
self.engine.qryTableDataFeed()
def quit(self):
if not self.auto_refresh_timer is None:
self.auto_refresh_timer.cancel()
if not self.engine is None:
self.engine.quit()
if not self.msg_thread is None:
self.__pushMsg(MessageTypes.QUIT)
self.msg_thread.join()
return
def closeEvent(self, event):
rtn = QMessageBox.question(self, 'Save & exit', 'Save positions to csv?',
QMessageBox.Save, QMessageBox.No, QMessageBox.Cancel)
if rtn == QMessageBox.Cancel:
event.ignore()
return
if rtn == QMessageBox.Save:
self.onSavePosition2Csv()
self.quit()
#close
super(OptionCalculator, self).closeEvent(event)
return
#-------------------------------------------------------------------------
def onEngineError(self, err):
with open(r'./logs.txt', 'a') as fid:
err_info = '\n>>%s\n%s' % (str(DT.datetime.now()), str(err))
fid.write(err_info)
self.emit(SIGNAL('ENGINE_ERROR'))
return
def onRepTableFeed(self, option_data, stock_data, ptf_data):
self.__pushMsg(MessageTypes.REPLY_TABLE_FEED, (option_data, stock_data, ptf_data))
def onRepEtfQuoteFeed(self, etf_data):
self.__pushMsg(MessageTypes.REPLY_ETF_QUOTE_FEED, etf_data)
def onRepCalGreeksSensibility(self, plot_data, x_axis_type):
self.__pushMsg(MessageTypes.REPLY_CAL_SENSI, (plot_data, x_axis_type))
def onRepCalExerciseCurve(self, plot_data):
self.__pushMsg(MessageTypes.REPLY_EXERCISE_CURVE, plot_data)
def onRepPositionBasedataFeed(self, positions):
self.__pushMsg(MessageTypes.REPLY_POSITION_BASEDATA_FEED, positions)
def __handleMessage(self):
while True:
msg = self.msg.getMsg()
if msg is None:
self.msg_event.wait()
self.msg_event.clear()
#received data for display
elif msg.type is MessageTypes.REPLY_TABLE_FEED:
self.emit(SIGNAL('SET_CENTRAL_DISPLAY'),
msg.content[0], msg.content[1], msg.content[2])
elif msg.type is MessageTypes.REPLY_ETF_QUOTE_FEED:
self.emit(SIGNAL('SET_ETF_DISPLAY'), msg.content)
elif msg.type is MessageTypes.REPLY_CAL_SENSI:
self.emit(SIGNAL('PLOT_SENSIBILITY'), msg.content[0], msg.content[1])
elif msg.type is MessageTypes.REPLY_EXERCISE_CURVE:
self.emit(SIGNAL('PLOT_EXERCISE_CURVE'), msg.content)
elif msg.type is MessageTypes.REPLY_POSITION_BASEDATA_FEED:
self.__updatePosEditorData(msg.content)
elif msg.type is MessageTypes.QUIT:
break
return
def __pushMsg(self, msg_type, content=None):
self.msg.pushMsg(msg_type, content)
self.msg_event.set()
#----------------------------------------------------------------------
def onEditorClickBtSaveAll(self, position_data):
self.engine.qryReloadPositions(position_data)
self.__queryUpdateData()
def onEditorClickBtReloadPosition(self):
self.engine.qryReloadPositions()
self.engine.qryPositionBasedata()
self.__queryUpdateData()
def onSavePosition2Csv(self):
self.engine.qrySavePositionCsv()
#----------------------------------------------------------------------
def __onRefreshQuoteBtClicked(self):
self.__queryUpdateData()
def __onEditPosBtClicked(self):
self.edit_dialog.wakeupEditor()
self.engine.qryPositionBasedata()
def __onPlotBtClicked(self):
x_axis_type = self.greeks_x_axis_combobox.currentIndex()
#pass group id list
if self.portfolio_checkBox.isChecked():
#collect group id
group_ids = list()
for r in getSelectedRows(self.portfolio_vtable):
item = self.portfolio_data.getValueByHeader(r, 'group')
try:
group_ids.append(int(item))
except:
pass
if group_ids:
if x_axis_type == 0:
self.engine.qryCalGreeksSensibilityByGroup(group_ids, group_ids, XAxisType.PRICE)
elif x_axis_type == 1:
self.engine.qryCalGreeksSensibilityByGroup(group_ids, group_ids, XAxisType.VOLATILITY)
elif x_axis_type == 2:
self.engine.qryCalGreeksSensibilityByGroup(group_ids, group_ids, XAxisType.TIME)
elif x_axis_type == 3:
self.engine.qryExerciseCurveByGroup(group_ids, group_ids)
#pass row numbers list
else:
option_idx = getSelectedRows(self.position_vtable)
stock_idx = getSelectedRows(self.stock_position_vtable)
if option_idx or stock_idx:
if x_axis_type == 0:
self.engine.qryCalGreeksSensibilityByPosition(option_idx, stock_idx, XAxisType.PRICE)
elif x_axis_type == 1:
self.engine.qryCalGreeksSensibilityByPosition(option_idx, stock_idx, XAxisType.VOLATILITY)
elif x_axis_type == 2:
self.engine.qryCalGreeksSensibilityByPosition(option_idx, stock_idx, XAxisType.TIME)
elif x_axis_type == 3:
self.engine.qryExerciseCurveByPosition(option_idx, stock_idx)
return
#-------------------------------------------------------------------
def __startAutoRefresh(self, onInit=False):
self.auto_refresh_timer = THD.Timer(300, self.__startAutoRefresh)
self.auto_refresh_timer.start()
if not onInit:
self.__queryUpdateData()
return
def __queryUpdateData(self):
if not self.is_updating:
self.is_updating = True
self.engine.qryUpdateData()
self.engine.qryEtfQuoteFeed()
self.engine.qryTableDataFeed()
def __setEtfDataDisplay(self, etf_data):
self.update_time_label.setText('%s' % etf_data.getByHeader(0, 'update_time').strftime(r'%H:%M:%S'))
self.etf_openprice_label.setText('open: %.3f' % etf_data.getByHeader(0, 'open_price'))
self.etf_highprice_label.setText('high: %.3f' % etf_data.getByHeader(0, 'high_price'))
self.etf_lowprice_label.setText('low: %.3f' % etf_data.getByHeader(0, 'low_price'))
self.etf_lastprice_label.setText('last: %.3f' % etf_data.getByHeader(0, 'last_price'))
def __setCentralTableDisplay(self, option_data, stock_data, portfolio_data):
self.option_data.setTableContent(option_data)
self.stock_data.setTableContent(stock_data)
self.portfolio_data.setTableContent(portfolio_data)
#notify_updating_completed
self.is_updating = False
return
def __updatePosEditorData(self, pos_table_handler):
self.edit_dialog.setEditTableContent(pos_table_handler)
def __plotGreeksSensibility(self, p_data, x_axis_type):
if x_axis_type == XAxisType.PRICE:
figure_name = 'by price'
elif x_axis_type == XAxisType.VOLATILITY:
figure_name = 'by volatility'
elif x_axis_type == XAxisType.TIME:
figure_name = 'by time'
else:
figure_name = ''
fig = PLT.figure()
fig.suptitle(figure_name)
sp = fig.add_subplot(2, 2, 1)
decorateAndPlot(sp, p_data['ax_x'], p_data['delta'],
title='delta', central_x=p_data['central_x'])
sp = fig.add_subplot(2, 2, 2)
decorateAndPlot(sp, p_data['ax_x'], p_data['gamma'],
title='gamma', central_x=p_data['central_x'])
sp = fig.add_subplot(2, 2, 3)
decorateAndPlot(sp, p_data['ax_x'], p_data['vega'],
title='vega', central_x=p_data['central_x'])
sp = fig.add_subplot(2, 2, 4)
decorateAndPlot(sp, p_data['ax_x'], p_data['theta'],
title='theta', central_x=p_data['central_x'])
PLT.show()
return
def __plotExerciseCurve(self, plot_data):
fig = PLT.figure()
fig.suptitle('Theoretical earnings curve')
sp = fig.add_subplot(1, 1, 1)
decorateAndPlot(sp, plot_data['ax_x'], plot_data['exercise_profit'],
central_x=plot_data['central_x'])
plotZeroLine(sp)
PLT.show()
return
def __notifyErrorOccur(self):
QMessageBox.question(self, 'Error', 'engine error occurs, restart manually ...',
QMessageBox.Yes)
| gpl-2.0 |
LohithBlaze/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
mrgloom/HPOlib | HPOlib/Plotting/plotOptimizerOverhead.py | 5 | 8458 | #!/usr/bin/env python
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import cPickle
import itertools
import sys
import matplotlib.pyplot as plt
import matplotlib.gridspec
import numpy as np
from HPOlib.Plotting import plot_util
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
def plot_time_trace(time_dict, name_list, title="", log=True, save="", y_max=0, y_min=0):
colors = plot_util.get_plot_colors()
markers = plot_util.get_plot_markers()
linestyles = itertools.cycle(['-'])
size = 5
ratio = 5
gs = matplotlib.gridspec.GridSpec(ratio, 1)
fig = plt.figure(1, dpi=100)
fig.suptitle(title, fontsize=16)
ax1 = plt.subplot(gs[0:ratio, :])
ax1.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
min_val = sys.maxint
max_val = -sys.maxint
max_trials = 0
trial_list_means = list()
trial_list_std = list()
num_runs_list = list()
# Get mean and std for all times and optimizers
for entry in name_list:
k = entry[0]
trial_list_std.append(np.std(np.array(time_dict[k]), axis=0))
if log:
trial_list_means.append(np.log10(np.mean(np.array(time_dict[k]), axis=0)))
else:
trial_list_means.append(np.mean(np.array(time_dict[k]), axis=0))
num_runs_list.append(len(time_dict[k]))
for k in range(len(name_list)):
# Plot mean and std for optimizer duration
c = colors.next()
m = markers.next()
x = range(len(trial_list_means[k]))
l = linestyles.next()
ax1.fill_between(x, trial_list_means[k] - trial_list_std[k],
trial_list_means[k] + trial_list_std[k],
facecolor=c, alpha=0.3, edgecolor=c)
ax1.plot(x, trial_list_means[k], color=c, linewidth=size, label=name_list[k][0], linestyle=l, marker=m)
# Plot number of func evals for this experiment
if min(trial_list_means[k] - trial_list_std[k]) < min_val:
min_val = min(trial_list_means[k] - trial_list_std[k])
if max(trial_list_means[k] + trial_list_std[k]) > max_val:
max_val = max(trial_list_means[k] + trial_list_std[k])
if len(trial_list_means[k]) > max_trials:
max_trials = len(trial_list_means[k])
# Descript and label the stuff
fig.suptitle(title, fontsize=16)
leg = ax1.legend(loc='best', fancybox=True)
leg.get_frame().set_alpha(0.5)
if log:
ax1.set_ylabel("log10(Optimizer time in [sec])")
else:
ax1.set_ylabel("Optimizer time in [sec]")
if y_max == y_min:
ax1.set_ylim([min_val-2, max_val+2])
else:
ax1.set_ylim([y_min, y_max])
ax1.set_xlim([0, max_trials])
plt.tight_layout()
plt.subplots_adjust(top=0.85)
if save != "":
plt.savefig(save, dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches="tight", pad_inches=0.1)
else:
plt.show()
def main(pkl_list, name_list, autofill, title="", log=False, save="",
y_min=0, y_max=0, cut=sys.maxint):
times_dict = dict()
for exp in range(len(name_list)):
times_dict[name_list[exp][0]] = list()
for pkl in pkl_list[exp]:
fh = open(pkl, "r")
trials = cPickle.load(fh)
fh.close()
# Get all variables from the trials object
cv_starttime = trials["cv_starttime"][:cut]
cv_endtime = trials["cv_endtime"][:cut]
# Get optimizer duration times
time_list = list()
# First duration
time_list.append(cv_starttime[0] - trials["starttime"][0])
time_idx = 0
for i in range(len(cv_starttime[1:])):
# Is there a next restored run?
# if yes, does next cvstart belong to a restored run?
if len(trials["endtime"]) > time_idx and \
cv_starttime[i+1] > trials["endtime"][time_idx]:
# Check whether run crashed/terminated during cv
# Equals means that the run crashed
if cv_endtime[i] < trials["endtime"][time_idx]:
# No .. everything is fine
time_list.append((trials["endtime"][time_idx] - cv_endtime[i]))
time_list.append((cv_starttime[i + 1] - trials["starttime"][time_idx+1]))
elif trials["endtime"][time_idx] == cv_endtime[i]:
# Yes, but BBoM managed to set an endtime
pass
else:
# Yes ... trouble
print "Help"
print trials["endtime"][time_idx]
print cv_endtime[i]
time_idx += 1
# everything ...
else:
time_list.append(cv_starttime[i + 1] - cv_endtime[i])
times_dict[name_list[exp][0]].append(time_list)
for key in times_dict.keys():
max_len = max([len(ls) for ls in times_dict[key]])
for t in range(len(times_dict[key])):
if len(times_dict[key][t]) < max_len and autofill:
diff = max_len - len(times_dict[key][t])
# noinspection PyUnusedLocal
times_dict[key][t] = np.append(times_dict[key][t], [times_dict[key][t][-1] for x in range(diff)])
elif len(times_dict[key][t]) < max_len and not autofill:
raise ValueError("(%s != %s), Traces do not have the same length, please use -a" %
(str(max_len), str(len(times_dict[key][t]))))
plot_time_trace(times_dict, name_list, title=title, log=log, save=save,
y_min=y_min, y_max=y_max)
if save != "":
sys.stdout.write("Saved plot to " + save + "\n")
else:
sys.stdout.write("..Done\n")
if __name__ == "__main__":
prog = "python plotOptimizerOverhead.py WhatIsThis <oneOrMorePickles> [WhatIsThis <oneOrMorePickles>]"
description = "Plot a Trace with std for multiple experiments"
parser = ArgumentParser(description=description, prog=prog)
# General Options
parser.add_argument("-c", "--cut", type=int, default=sys.maxint,
help="Cut the experiment pickle length.")
parser.add_argument("-l", "--log", action="store_true", dest="log",
default=False, help="Plot on log scale")
parser.add_argument("--max", type=float, dest="max",
default=0, help="Maximum of the plot")
parser.add_argument("--min", type=float, dest="min",
default=0, help="Minimum of the plot")
parser.add_argument("-s", "--save", dest="save",
default="", help="Where to save plot instead of showing it?")
parser.add_argument("-t", "--title", dest="title",
default="", help="Choose a supertitle for the plot")
# Options which are available only for this plot
parser.add_argument("-a", "--autofill", action="store_true", dest="autofill",
default=False, help="Fill trace automatically")
args, unknown = parser.parse_known_args()
sys.stdout.write("\nFound " + str(len(unknown)) + " arguments\n")
pkl_list_main, name_list_main = plot_util.get_pkl_and_name_list(unknown)
main(pkl_list_main, name_list_main, autofill=args.autofill, title=args.title,
log=args.log, save=args.save, y_min=args.min, y_max=args.max,
cut=args.cut)
| gpl-3.0 |
rkmaddox/mne-python | mne/fixes.py | 4 | 35959 | """Compatibility fixes for older versions of libraries
If you add content to this file, please give the version of the package
at which the fix is no longer needed.
# originally copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD
from distutils.version import LooseVersion
import functools
import inspect
from math import log
import os
from pathlib import Path
import warnings
import numpy as np
###############################################################################
# Misc
def _median_complex(data, axis):
"""Compute marginal median on complex data safely.
Can be removed when numpy introduces a fix.
See: https://github.com/scipy/scipy/pull/12676/.
"""
# np.median must be passed real arrays for the desired result
if np.iscomplexobj(data):
data = (np.median(np.real(data), axis=axis)
+ 1j * np.median(np.imag(data), axis=axis))
else:
data = np.median(data, axis=axis)
return data
# helpers to get function arguments
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _safe_svd(A, **kwargs):
"""Wrapper to get around the SVD did not converge error of death"""
# Intel has a bug with their GESVD driver:
# https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa: E501
# For SciPy 0.18 and up, we can work around it by using
# lapack_driver='gesvd' instead.
from scipy import linalg
if kwargs.get('overwrite_a', False):
raise ValueError('Cannot set overwrite_a=True with this function')
try:
return linalg.svd(A, **kwargs)
except np.linalg.LinAlgError as exp:
from .utils import warn
if 'lapack_driver' in _get_args(linalg.svd):
warn('SVD error (%s), attempting to use GESVD instead of GESDD'
% (exp,))
return linalg.svd(A, lapack_driver='gesvd', **kwargs)
else:
raise
def _csc_matrix_cast(x):
from scipy.sparse import csc_matrix
return csc_matrix(x)
###############################################################################
# Backporting nibabel's read_geometry
def _get_read_geometry():
"""Get the geometry reading function."""
try:
import nibabel as nib
has_nibabel = True
except ImportError:
has_nibabel = False
if has_nibabel:
from nibabel.freesurfer import read_geometry
else:
read_geometry = _read_geometry
return read_geometry
def _read_geometry(filepath, read_metadata=False, read_stamp=False):
"""Backport from nibabel."""
from .surface import _fread3, _fread3_many
volume_info = dict()
TRIANGLE_MAGIC = 16777214
QUAD_MAGIC = 16777215
NEW_QUAD_MAGIC = 16777213
with open(filepath, "rb") as fobj:
magic = _fread3(fobj)
if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file
nvert = _fread3(fobj)
nquad = _fread3(fobj)
(fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.)
coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float64) / div
coords = coords.reshape(-1, 3)
quads = _fread3_many(fobj, nquad * 4)
quads = quads.reshape(nquad, 4)
#
# Face splitting follows
#
faces = np.zeros((2 * nquad, 3), dtype=np.int64)
nface = 0
for quad in quads:
if (quad[0] % 2) == 0:
faces[nface] = quad[0], quad[1], quad[3]
nface += 1
faces[nface] = quad[2], quad[3], quad[1]
nface += 1
else:
faces[nface] = quad[0], quad[1], quad[2]
nface += 1
faces[nface] = quad[0], quad[2], quad[3]
nface += 1
elif magic == TRIANGLE_MAGIC: # Triangle file
create_stamp = fobj.readline().rstrip(b'\n').decode('utf-8')
fobj.readline()
vnum = np.fromfile(fobj, ">i4", 1)[0]
fnum = np.fromfile(fobj, ">i4", 1)[0]
coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
if read_metadata:
volume_info = _read_volume_info(fobj)
else:
raise ValueError("File does not appear to be a Freesurfer surface")
coords = coords.astype(np.float64)
ret = (coords, faces)
if read_metadata:
if len(volume_info) == 0:
warnings.warn('No volume information contained in the file')
ret += (volume_info,)
if read_stamp:
ret += (create_stamp,)
return ret
###############################################################################
# Triaging FFT functions to get fast pocketfft (SciPy 1.4)
@functools.lru_cache(None)
def _import_fft(name):
single = False
if not isinstance(name, tuple):
name = (name,)
single = True
try:
from scipy.fft import rfft # noqa analysis:ignore
except ImportError:
from numpy import fft # noqa
else:
from scipy import fft # noqa
out = [getattr(fft, n) for n in name]
if single:
out = out[0]
return out
###############################################################################
# NumPy Generator (NumPy 1.17)
def rng_uniform(rng):
"""Get the unform/randint from the rng."""
# prefer Generator.integers, fall back to RandomState.randint
return getattr(rng, 'integers', getattr(rng, 'randint', None))
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
###############################################################################
# Misc utilities
# get_fdata() requires knowing the dtype ahead of time, so let's triage on our
# own instead
def _get_img_fdata(img):
data = np.asanyarray(img.dataobj)
dtype = np.complex128 if np.iscomplexobj(data) else np.float64
return data.astype(dtype)
def _read_volume_info(fobj):
"""An implementation of nibabel.freesurfer.io._read_volume_info, since old
versions of nibabel (<=2.1.0) don't have it.
"""
volume_info = dict()
head = np.fromfile(fobj, '>i4', 1)
if not np.array_equal(head, [20]): # Read two bytes more
head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)])
if not np.array_equal(head, [2, 0, 20]):
warnings.warn("Unknown extension code.")
return volume_info
volume_info['head'] = head
for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']:
pair = fobj.readline().decode('utf-8').split('=')
if pair[0].strip() != key or len(pair) != 2:
raise IOError('Error parsing volume info.')
if key in ('valid', 'filename'):
volume_info[key] = pair[1].strip()
elif key == 'volume':
volume_info[key] = np.array(pair[1].split()).astype(int)
else:
volume_info[key] = np.array(pair[1].split()).astype(float)
# Ignore the rest
return volume_info
def _serialize_volume_info(volume_info):
"""An implementation of nibabel.freesurfer.io._serialize_volume_info, since
old versions of nibabel (<=2.1.0) don't have it."""
keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']
diff = set(volume_info.keys()).difference(keys)
if len(diff) > 0:
raise ValueError('Invalid volume info: %s.' % diff.pop())
strings = list()
for key in keys:
if key == 'head':
if not (np.array_equal(volume_info[key], [20]) or np.array_equal(
volume_info[key], [2, 0, 20])):
warnings.warn("Unknown extension code.")
strings.append(np.array(volume_info[key], dtype='>i4').tobytes())
elif key in ('valid', 'filename'):
val = volume_info[key]
strings.append('{} = {}\n'.format(key, val).encode('utf-8'))
elif key == 'volume':
val = volume_info[key]
strings.append('{} = {} {} {}\n'.format(
key, val[0], val[1], val[2]).encode('utf-8'))
else:
val = volume_info[key]
strings.append('{} = {:0.10g} {:0.10g} {:0.10g}\n'.format(
key.ljust(6), val[0], val[1], val[2]).encode('utf-8'))
return b''.join(strings)
##############################################################################
# adapted from scikit-learn
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
_DEFAULT_TAGS = {
'non_deterministic': False,
'requires_positive_X': False,
'requires_positive_y': False,
'X_types': ['2darray'],
'poor_score': False,
'no_validation': False,
'multioutput': False,
"allow_nan": False,
'stateless': False,
'multilabel': False,
'_skip_test': False,
'_xfail_checks': False,
'multioutput_only': False,
'binary_only': False,
'requires_fit': True,
'preserves_dtype': [np.float64],
'requires_y': False,
'pairwise': False,
}
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn.
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : bool, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Parameters
----------
**params : dict
Parameters.
Returns
-------
inst : instance
The object.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
from sklearn.base import _pprint
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
# __getstate__ and __setstate__ are omitted because they only contain
# conditionals that are not satisfied by our objects (e.g.,
# ``if type(self).__module__.startswith('sklearn.')``.
def _more_tags(self):
return _DEFAULT_TAGS
def _get_tags(self):
collected_tags = {}
for base_class in reversed(inspect.getmro(self.__class__)):
if hasattr(base_class, '_more_tags'):
# need the if because mixins might not have _more_tags
# but might do redundant work in estimators
# (i.e. calling more tags on BaseEstimator multiple times)
more_tags = base_class._more_tags(self)
collected_tags.update(more_tags)
return collected_tags
# newer sklearn deprecates importing from sklearn.metrics.scoring,
# but older sklearn does not expose check_scoring in sklearn.metrics.
def _get_check_scoring():
try:
from sklearn.metrics import check_scoring # noqa
except ImportError:
from sklearn.metrics.scorer import check_scoring # noqa
return check_scoring
def _check_fit_params(X, fit_params, indices=None):
"""Check and validate the parameters passed during `fit`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data array.
fit_params : dict
Dictionary containing the parameters passed at fit.
indices : array-like of shape (n_samples,), default=None
Indices to be selected if the parameter has the same size as
`X`.
Returns
-------
fit_params_validated : dict
Validated parameters. We ensure that the values support
indexing.
"""
try:
from sklearn.utils.validation import \
_check_fit_params as _sklearn_check_fit_params
return _sklearn_check_fit_params(X, fit_params, indices)
except ImportError:
from sklearn.model_selection import _validation
fit_params_validated = \
{k: _validation._index_param_value(X, v, indices)
for k, v in fit_params.items()}
return fit_params_validated
###############################################################################
# Copied from sklearn to simplify code paths
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
from scipy import linalg
# covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like,
The precision matrix associated to the current covariance object.
"""
from scipy import linalg
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fit the Maximum Likelihood Estimator covariance model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : ndarray | None
Not used, present for API consistency.
Returns
-------
self : object
Returns self.
""" # noqa: E501
# X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Compute the log-likelihood of a Gaussian dataset.
Uses ``self.covariance_`` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : ndarray | None
Not used, present for API consistency.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
from scipy import linalg
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, observations):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Squared Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
centered_obs = observations - self.location_
mahalanobis_dist = np.sum(
np.dot(centered_obs, precision) * centered_obs, 1)
return mahalanobis_dist
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + _logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
# sklearn uses np.linalg for this, but ours is more robust to zero eigenvalues
def _logdet(A):
"""Compute the log det of a positive semidefinite matrix."""
from scipy import linalg
vals = linalg.eigvalsh(A)
# avoid negative (numerical errors) or zero (semi-definite matrix) values
tol = vals.max() * vals.size * np.finfo(np.float64).eps
vals = np.where(vals > tol, vals, tol)
return np.sum(np.log(vals))
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
def _assess_dimension_(spectrum, rank, n_samples, n_features):
from scipy.special import gammaln
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.) -
log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def svd_flip(u, v, u_based_decision=True):
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, np.arange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[np.arange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
# This shim can be removed once NumPy 1.19.0+ is required (1.18.4 has sign bug)
def svd(a, hermitian=False):
if hermitian: # faster
s, u = np.linalg.eigh(a)
sgn = np.sign(s)
s = np.abs(s)
sidx = np.argsort(s)[..., ::-1]
sgn = np.take_along_axis(sgn, sidx, axis=-1)
s = np.take_along_axis(s, sidx, axis=-1)
u = np.take_along_axis(u, sidx[..., None, :], axis=-1)
# singular values are unsigned, move the sign into v
vt = (u * sgn[..., np.newaxis, :]).swapaxes(-2, -1).conj()
np.abs(s, out=s)
return u, s, vt
else:
return np.linalg.svd(a)
###############################################################################
# From nilearn
def _crop_colorbar(cbar, cbar_vmin, cbar_vmax):
"""
crop a colorbar to show from cbar_vmin to cbar_vmax
Used when symmetric_cbar=False is used.
"""
import matplotlib
if (cbar_vmin is None) and (cbar_vmax is None):
return
cbar_tick_locs = cbar.locator.locs
if cbar_vmax is None:
cbar_vmax = cbar_tick_locs.max()
if cbar_vmin is None:
cbar_vmin = cbar_tick_locs.min()
new_tick_locs = np.linspace(cbar_vmin, cbar_vmax,
len(cbar_tick_locs))
# matplotlib >= 3.2.0 no longer normalizes axes between 0 and 1
# See https://matplotlib.org/3.2.1/api/prev_api_changes/api_changes_3.2.0.html
# _outline was removed in
# https://github.com/matplotlib/matplotlib/commit/03a542e875eba091a027046d5ec652daa8be6863
# so we use the code from there
if LooseVersion(matplotlib.__version__) >= LooseVersion("3.2.0"):
cbar.ax.set_ylim(cbar_vmin, cbar_vmax)
X, _ = cbar._mesh()
X = np.array([X[0], X[-1]])
Y = np.array([[cbar_vmin, cbar_vmin], [cbar_vmax, cbar_vmax]])
N = X.shape[0]
ii = [0, 1, N - 2, N - 1, 2 * N - 1, 2 * N - 2, N + 1, N, 0]
x = X.T.reshape(-1)[ii]
y = Y.T.reshape(-1)[ii]
xy = (np.column_stack([y, x])
if cbar.orientation == 'horizontal' else
np.column_stack([x, y]))
cbar.outline.set_xy(xy)
else:
cbar.ax.set_ylim(cbar.norm(cbar_vmin), cbar.norm(cbar_vmax))
outline = cbar.outline.get_xy()
outline[:2, 1] += cbar.norm(cbar_vmin)
outline[2:6, 1] -= (1. - cbar.norm(cbar_vmax))
outline[6:, 1] += cbar.norm(cbar_vmin)
cbar.outline.set_xy(outline)
cbar.set_ticks(new_tick_locs, update_ticks=True)
###############################################################################
# Numba (optional requirement)
# Here we choose different defaults to speed things up by default
try:
import numba
if LooseVersion(numba.__version__) < LooseVersion('0.40'):
raise ImportError
prange = numba.prange
def jit(nopython=True, nogil=True, fastmath=True, cache=True,
**kwargs): # noqa
return numba.jit(nopython=nopython, nogil=nogil, fastmath=fastmath,
cache=cache, **kwargs)
except ImportError:
has_numba = False
else:
has_numba = (os.getenv('MNE_USE_NUMBA', 'true').lower() == 'true')
if not has_numba:
def jit(**kwargs): # noqa
def _jit(func):
return func
return _jit
prange = range
bincount = np.bincount
mean = np.mean
else:
@jit()
def bincount(x, weights, minlength): # noqa: D103
out = np.zeros(minlength)
for idx, w in zip(x, weights):
out[idx] += w
return out
# fix because Numba does not support axis kwarg for mean
@jit()
def _np_apply_along_axis(func1d, axis, arr):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@jit()
def mean(array, axis):
return _np_apply_along_axis(np.mean, axis, array)
###############################################################################
# workaround: plt.close() doesn't spawn close_event on Agg backend
# (check MPL github issue #18609; scheduled to be fixed by MPL 3.4)
def _close_event(fig):
"""Force calling of the MPL figure close event."""
try:
fig.canvas.close_event()
except ValueError: # old mpl with Qt
pass # pragma: no cover
def _is_last_row(ax):
try:
return ax.get_subplotspec().is_last_row() # 3.4+
except AttributeError:
return ax.is_last_row()
return ax.get_subplotspec().is_last_row()
###############################################################################
# SciPy deprecation of pinv + pinvh rcond (never worked properly anyway) in 1.7
def pinvh(a, rtol=None):
"""Compute a pseudo-inverse of a Hermitian matrix."""
from scipy.linalg.decomp import _asarray_validated
s, u = np.linalg.eigh(a)
del a
if rtol is None:
rtol = s.size * np.finfo(s.dtype).eps
maxS = np.max(np.abs(s))
above_cutoff = (abs(s) > maxS * rtol)
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
return (u * psigma_diag) @ u.conj().T
def pinv(a, rtol=None):
"""Compute a pseudo-inverse of a matrix."""
u, s, vh = np.linalg.svd(a, full_matrices=False)
del a
maxS = np.max(s)
if rtol is None:
rtol = max(vh.shape + u.shape) * np.finfo(u.dtype).eps
rank = np.sum(s > maxS * rtol)
u = u[:, :rank]
u /= s[:rank]
return (u @ vh[:rank]).conj().T
| bsd-3-clause |
cbertinato/pandas | doc/source/conf.py | 1 | 23864 | #
# pandas documentation build configuration file, created by
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
import importlib
import logging
import jinja2
from sphinx.ext.autosummary import _import_by_name
from numpydoc.docscrape import NumpyDocString
logger = logging.getLogger(__name__)
# https://github.com/sphinx-doc/sphinx/pull/2325/files
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000.
sys.setrecursionlimit(5000)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.extend([
# numpy standard doc extensions
os.path.join(os.path.dirname(__file__),
'..', '../..',
'sphinxext')
])
# -- General configuration -----------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# sphinxext.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.todo',
'numpydoc', # handle NumPy documentation formatted docstrings
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.linkcode',
'nbsphinx',
'contributors', # custom pandas extension
]
exclude_patterns = ['**.ipynb_checkpoints']
try:
import nbconvert
except ImportError:
logger.warn('nbconvert not installed. Skipping notebooks.')
exclude_patterns.append('**/*.ipynb')
else:
try:
nbconvert.utils.pandoc.get_pandoc_version()
except nbconvert.utils.pandoc.PandocMissing:
logger.warn('Pandoc not installed. Skipping notebooks.')
exclude_patterns.append('**/*.ipynb')
# sphinx_pattern can be '-api' to exclude the API pages,
# the path to a file, or a Python object
# (e.g. '10min.rst' or 'pandas.DataFrame.head')
source_path = os.path.dirname(os.path.abspath(__file__))
pattern = os.environ.get('SPHINX_PATTERN')
if pattern:
for dirname, dirs, fnames in os.walk(source_path):
for fname in fnames:
if os.path.splitext(fname)[-1] in ('.rst', '.ipynb'):
fname = os.path.relpath(os.path.join(dirname, fname),
source_path)
if (fname == 'index.rst'
and os.path.abspath(dirname) == source_path):
continue
elif pattern == '-api' and dirname == 'reference':
exclude_patterns.append(fname)
elif pattern != '-api' and fname != pattern:
exclude_patterns.append(fname)
with open(os.path.join(source_path, 'index.rst.template')) as f:
t = jinja2.Template(f.read())
with open(os.path.join(source_path, 'index.rst'), 'w') as f:
f.write(t.render(include_api=pattern is None,
single_doc=(pattern
if pattern is not None and pattern != '-api'
else None)))
autosummary_generate = True if pattern is None else ['index']
# numpydoc
numpydoc_attributes_as_param_list = False
# matplotlib plot directive
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
plot_pre_code = """import numpy as np
import pandas as pd"""
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../_templates']
# The suffix of source filenames.
source_suffix = [
'.rst',
]
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pandas'
copyright = '2008-2014, the pandas development team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import pandas
# version = '%s r%s' % (pandas.__version__, svn_version())
version = str(pandas.__version__)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all
# documents. default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature_with_gtoc'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'statsmodels.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = os.path.join(html_static_path[0], 'favicon.ico')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# Add redirect for previously existing API pages
# each item is like `(from_old, to_new)`
# To redirect a class and all its methods, see below
# https://github.com/pandas-dev/pandas/issues/16186
moved_api_pages = [
('pandas.core.common.isnull', 'pandas.isna'),
('pandas.core.common.notnull', 'pandas.notna'),
('pandas.core.reshape.get_dummies', 'pandas.get_dummies'),
('pandas.tools.merge.concat', 'pandas.concat'),
('pandas.tools.merge.merge', 'pandas.merge'),
('pandas.tools.pivot.pivot_table', 'pandas.pivot_table'),
('pandas.tseries.tools.to_datetime', 'pandas.to_datetime'),
('pandas.io.clipboard.read_clipboard', 'pandas.read_clipboard'),
('pandas.io.excel.ExcelFile.parse', 'pandas.ExcelFile.parse'),
('pandas.io.excel.read_excel', 'pandas.read_excel'),
('pandas.io.gbq.read_gbq', 'pandas.read_gbq'),
('pandas.io.html.read_html', 'pandas.read_html'),
('pandas.io.json.read_json', 'pandas.read_json'),
('pandas.io.parsers.read_csv', 'pandas.read_csv'),
('pandas.io.parsers.read_fwf', 'pandas.read_fwf'),
('pandas.io.parsers.read_table', 'pandas.read_table'),
('pandas.io.pickle.read_pickle', 'pandas.read_pickle'),
('pandas.io.pytables.HDFStore.append', 'pandas.HDFStore.append'),
('pandas.io.pytables.HDFStore.get', 'pandas.HDFStore.get'),
('pandas.io.pytables.HDFStore.put', 'pandas.HDFStore.put'),
('pandas.io.pytables.HDFStore.select', 'pandas.HDFStore.select'),
('pandas.io.pytables.read_hdf', 'pandas.read_hdf'),
('pandas.io.sql.read_sql', 'pandas.read_sql'),
('pandas.io.sql.read_frame', 'pandas.read_frame'),
('pandas.io.sql.write_frame', 'pandas.write_frame'),
('pandas.io.stata.read_stata', 'pandas.read_stata'),
]
# Again, tuples of (from_old, to_new)
moved_classes = [
('pandas.tseries.resample.Resampler', 'pandas.core.resample.Resampler'),
('pandas.formats.style.Styler', 'pandas.io.formats.style.Styler'),
]
for old, new in moved_classes:
# the class itself...
moved_api_pages.append((old, new))
mod, classname = new.rsplit('.', 1)
klass = getattr(importlib.import_module(mod), classname)
methods = [x for x in dir(klass)
if not x.startswith('_') or x in ('__iter__', '__array__')]
for method in methods:
# ... and each of its public methods
moved_api_pages.append(
("{old}.{method}".format(old=old, method=method),
"{new}.{method}".format(new=new, method=method))
)
if pattern is None:
html_additional_pages = {
'generated/' + page[0]: 'api_redirect.html'
for page in moved_api_pages
}
header = """\
.. currentmodule:: pandas
.. ipython:: python
:suppress:
import numpy as np
import pandas as pd
randn = np.random.randn
np.random.seed(123456)
np.set_printoptions(precision=4, suppress=True)
pd.options.display.max_rows = 15
import os
os.chdir(r'{}')
""".format(os.path.dirname(os.path.dirname(__file__)))
html_context = {
'redirects': {old: new for old, new in moved_api_pages},
'header': header
}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pandas'
# -- Options for nbsphinx ------------------------------------------------
nbsphinx_allow_errors = True
# -- Options for LaTeX output --------------------------------------------
latex_elements = {}
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples (source start
# file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pandas.tex',
'pandas: powerful Python data analysis toolkit',
r'Wes McKinney\n\& PyData Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
if pattern is None:
intersphinx_mapping = {
'dateutil': ("https://dateutil.readthedocs.io/en/latest/", None),
'matplotlib': ('https://matplotlib.org/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'pandas-gbq': ('https://pandas-gbq.readthedocs.io/en/latest/', None),
'py': ('https://pylib.readthedocs.io/en/latest/', None),
'python': ('https://docs.python.org/3/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'statsmodels': ('http://www.statsmodels.org/devel/', None),
}
# extlinks alias
extlinks = {'issue': ('https://github.com/pandas-dev/pandas/issues/%s',
'GH'),
'wiki': ('https://github.com/pandas-dev/pandas/wiki/%s',
'wiki ')}
ipython_warning_is_error = False
ipython_exec_lines = [
'import numpy as np',
'import pandas as pd',
# This ensures correct rendering on system with console encoding != utf8
# (windows). It forces pandas to encode its output reprs using utf8
# wherever the docs are built. The docs' target is the browser, not
# the console, so this is fine.
'pd.options.display.encoding="utf8"'
]
# Add custom Documenter to handle attributes/methods of an AccessorProperty
# eg pandas.Series.str and pandas.Series.dt (see GH9322)
import sphinx
from sphinx.util import rpartition
from sphinx.ext.autodoc import (
Documenter, MethodDocumenter, AttributeDocumenter)
from sphinx.ext.autosummary import Autosummary
class AccessorDocumenter(MethodDocumenter):
"""
Specialized Documenter subclass for accessors.
"""
objtype = 'accessor'
directivetype = 'method'
# lower than MethodDocumenter so this is not chosen for normal methods
priority = 0.6
def format_signature(self):
# this method gives an error/warning for the accessors, therefore
# overriding it (accessor has no arguments)
return ''
class AccessorLevelDocumenter(Documenter):
"""
Specialized Documenter subclass for objects on accessor level (methods,
attributes).
"""
# This is the simple straightforward version
# modname is None, base the last elements (eg 'hour')
# and path the part before (eg 'Series.dt')
# def resolve_name(self, modname, parents, path, base):
# modname = 'pandas'
# mod_cls = path.rstrip('.')
# mod_cls = mod_cls.split('.')
#
# return modname, mod_cls + [base]
def resolve_name(self, modname, parents, path, base):
if modname is None:
if path:
mod_cls = path.rstrip('.')
else:
mod_cls = None
# if documenting a class-level object without path,
# there must be a current class, either from a parent
# auto directive ...
mod_cls = self.env.temp_data.get('autodoc:class')
# ... or from a class directive
if mod_cls is None:
mod_cls = self.env.temp_data.get('py:class')
# ... if still None, there's no way to know
if mod_cls is None:
return None, []
# HACK: this is added in comparison to ClassLevelDocumenter
# mod_cls still exists of class.accessor, so an extra
# rpartition is needed
modname, accessor = rpartition(mod_cls, '.')
modname, cls = rpartition(modname, '.')
parents = [cls, accessor]
# if the module name is still missing, get it like above
if not modname:
modname = self.env.temp_data.get('autodoc:module')
if not modname:
if sphinx.__version__ > '1.3':
modname = self.env.ref_context.get('py:module')
else:
modname = self.env.temp_data.get('py:module')
# ... else, it stays None, which means invalid
return modname, parents + [base]
class AccessorAttributeDocumenter(AccessorLevelDocumenter,
AttributeDocumenter):
objtype = 'accessorattribute'
directivetype = 'attribute'
# lower than AttributeDocumenter so this is not chosen for normal
# attributes
priority = 0.6
class AccessorMethodDocumenter(AccessorLevelDocumenter, MethodDocumenter):
objtype = 'accessormethod'
directivetype = 'method'
# lower than MethodDocumenter so this is not chosen for normal methods
priority = 0.6
class AccessorCallableDocumenter(AccessorLevelDocumenter, MethodDocumenter):
"""
This documenter lets us removes .__call__ from the method signature for
callable accessors like Series.plot
"""
objtype = 'accessorcallable'
directivetype = 'method'
# lower than MethodDocumenter; otherwise the doc build prints warnings
priority = 0.5
def format_name(self):
return MethodDocumenter.format_name(self).rstrip('.__call__')
class PandasAutosummary(Autosummary):
"""
This alternative autosummary class lets us override the table summary for
Series.plot and DataFrame.plot in the API docs.
"""
def _replace_pandas_items(self, display_name, sig, summary, real_name):
# this a hack: ideally we should extract the signature from the
# .__call__ method instead of hard coding this
if display_name == 'DataFrame.plot':
sig = '([x, y, kind, ax, ....])'
summary = 'DataFrame plotting accessor and method'
elif display_name == 'Series.plot':
sig = '([kind, ax, figsize, ....])'
summary = 'Series plotting accessor and method'
return (display_name, sig, summary, real_name)
@staticmethod
def _is_deprecated(real_name):
try:
obj, parent, modname = _import_by_name(real_name)
except ImportError:
return False
doc = NumpyDocString(obj.__doc__ or '')
summary = ''.join(doc['Summary'] + doc['Extended Summary'])
return '.. deprecated::' in summary
def _add_deprecation_prefixes(self, items):
for item in items:
display_name, sig, summary, real_name = item
if self._is_deprecated(real_name):
summary = '(DEPRECATED) %s' % summary
yield display_name, sig, summary, real_name
def get_items(self, names):
items = Autosummary.get_items(self, names)
items = [self._replace_pandas_items(*item) for item in items]
items = list(self._add_deprecation_prefixes(items))
return items
# based on numpy doc/source/conf.py
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except AttributeError:
return None
try:
# inspect.unwrap() was added in Python version 3.4
if sys.version_info >= (3, 5):
fn = inspect.getsourcefile(inspect.unwrap(obj))
else:
fn = inspect.getsourcefile(obj)
except TypeError:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
lineno = None
if lineno:
linespec = "#L{:d}-L{:d}".format(lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
if '+' in pandas.__version__:
return ("http://github.com/pandas-dev/pandas/blob/master/pandas/"
"{}{}".format(fn, linespec))
else:
return ("http://github.com/pandas-dev/pandas/blob/"
"v{}/pandas/{}{}".format(pandas.__version__, fn, linespec))
# remove the docstring of the flags attribute (inherited from numpy ndarray)
# because these give doc build errors (see GH issue 5331)
def remove_flags_docstring(app, what, name, obj, options, lines):
if what == "attribute" and name.endswith(".flags"):
del lines[:]
def process_class_docstrings(app, what, name, obj, options, lines):
"""
For those classes for which we use ::
:template: autosummary/class_without_autosummary.rst
the documented attributes/methods have to be listed in the class
docstring. However, if one of those lists is empty, we use 'None',
which then generates warnings in sphinx / ugly html output.
This "autodoc-process-docstring" event connector removes that part
from the processed docstring.
"""
if what == "class":
joined = '\n'.join(lines)
templates = [
""".. rubric:: Attributes
.. autosummary::
:toctree:
None
""",
""".. rubric:: Methods
.. autosummary::
:toctree:
None
"""
]
for template in templates:
if template in joined:
joined = joined.replace(template, '')
lines[:] = joined.split('\n')
suppress_warnings = [
# We "overwrite" autosummary with our PandasAutosummary, but
# still want the regular autosummary setup to run. So we just
# suppress this warning.
'app.add_directive'
]
if pattern:
# When building a single document we don't want to warn because references
# to other documents are unknown, as it's expected
suppress_warnings.append('ref.ref')
def rstjinja(app, docname, source):
"""
Render our pages as a jinja template for fancy templating goodness.
"""
# http://ericholscher.com/blog/2016/jul/25/integrating-jinja-rst-sphinx/
# Make sure we're outputting HTML
if app.builder.format != 'html':
return
src = source[0]
rendered = app.builder.templates.render_string(
src, app.config.html_context
)
source[0] = rendered
def setup(app):
app.connect("source-read", rstjinja)
app.connect("autodoc-process-docstring", remove_flags_docstring)
app.connect("autodoc-process-docstring", process_class_docstrings)
app.add_autodocumenter(AccessorDocumenter)
app.add_autodocumenter(AccessorAttributeDocumenter)
app.add_autodocumenter(AccessorMethodDocumenter)
app.add_autodocumenter(AccessorCallableDocumenter)
app.add_directive('autosummary', PandasAutosummary)
| bsd-3-clause |
jangorecki/h2o-3 | h2o-py/tests/testdir_misc/pyunit_as_data_frame.py | 6 | 1667 | import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def pyunit_as_data_frame():
smallbike = h2o.import_file(pyunit_utils.locate("smalldata/jira/citibike_head.csv"))
##use_pandas = False
small_bike_list = smallbike.as_data_frame(use_pandas=False)
assert isinstance(small_bike_list, list)
assert len(small_bike_list[0]) == smallbike.ncol
assert len(small_bike_list) == smallbike.nrow + 1 #one extra for header
smallbike_noheader = smallbike.as_data_frame(use_pandas=False, header=False)
assert len(smallbike_noheader) == smallbike.nrow
head_small_bike = smallbike.head(rows=5, cols=2)
tail_small_bike = smallbike.tail(rows=5, cols=2)
assert len(head_small_bike[0]) == len(tail_small_bike[0]) == 5
assert len(head_small_bike) == len(tail_small_bike) == 5
# assert head_small_bike[1][-1] == tail_small_bike[1][1] # TODO
##use_pandas = True
small_bike_pandas = smallbike.as_data_frame(use_pandas=True)
assert small_bike_pandas.__class__.__name__ == "DataFrame"
assert small_bike_pandas.shape == (smallbike.nrow, smallbike.ncol)
head_small_bike_pandas = smallbike.head(rows=5).as_data_frame(True)
tail_small_bike_pandas = smallbike.tail(rows=5).as_data_frame(True)
assert head_small_bike_pandas.shape == tail_small_bike_pandas.shape == (5,smallbike.ncol)
assert head_small_bike_pandas.loc[1][2] == small_bike_pandas.loc[1][2]
assert tail_small_bike_pandas.loc[2][3] == small_bike_pandas.loc[6][3]
assert head_small_bike_pandas.loc[4][0] == tail_small_bike_pandas.loc[0][0]
if __name__ == "__main__":
pyunit_utils.standalone_test(pyunit_as_data_frame)
else:
pyunit_as_data_frame() | apache-2.0 |
amolkahat/pandas | pandas/core/computation/expressions.py | 2 | 7043 | """
Expressions
-----------
Offer fast expression evaluation through numexpr
"""
import warnings
import numpy as np
import pandas.core.common as com
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.core.config import get_option
if _NUMEXPR_INSTALLED:
import numexpr as ne
_TEST_MODE = None
_TEST_RESULT = None
_USE_NUMEXPR = _NUMEXPR_INSTALLED
_evaluate = None
_where = None
# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
'evaluate': {'int64', 'int32', 'float64', 'float32', 'bool'},
'where': {'int64', 'float64', 'bool'}
}
# the minimum prod shape that we will use numexpr
_MIN_ELEMENTS = 10000
def set_use_numexpr(v=True):
# set/unset to use numexpr
global _USE_NUMEXPR
if _NUMEXPR_INSTALLED:
_USE_NUMEXPR = v
# choose what we are going to do
global _evaluate, _where
if not _USE_NUMEXPR:
_evaluate = _evaluate_standard
_where = _where_standard
else:
_evaluate = _evaluate_numexpr
_where = _where_numexpr
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
def _evaluate_standard(op, op_str, a, b, **eval_kwargs):
""" standard evaluation """
if _TEST_MODE:
_store_test_result(False)
with np.errstate(all='ignore'):
return op(a, b)
def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
# check for dtype compatibility
dtypes = set()
for o in [a, b]:
if hasattr(o, 'get_dtype_counts'):
s = o.get_dtype_counts()
if len(s) > 1:
return False
dtypes |= set(s.index)
elif isinstance(o, np.ndarray):
dtypes |= {o.dtype.name}
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
def _evaluate_numexpr(op, op_str, a, b, truediv=True,
reversed=False, **eval_kwargs):
result = None
if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
try:
# we were originally called by a reversed op
# method
if reversed:
a, b = b, a
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate('a_value {op} b_value'.format(op=op_str),
local_dict={'a_value': a_value,
'b_value': b_value},
casting='safe', truediv=truediv,
**eval_kwargs)
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
if _TEST_MODE:
_store_test_result(result is not None)
if result is None:
result = _evaluate_standard(op, op_str, a, b)
return result
def _where_standard(cond, a, b):
return np.where(com.values_from_object(cond), com.values_from_object(a),
com.values_from_object(b))
def _where_numexpr(cond, a, b):
result = None
if _can_use_numexpr(None, 'where', a, b, 'where'):
try:
cond_value = getattr(cond, 'values', cond)
a_value = getattr(a, 'values', a)
b_value = getattr(b, 'values', b)
result = ne.evaluate('where(cond_value, a_value, b_value)',
local_dict={'cond_value': cond_value,
'a_value': a_value,
'b_value': b_value},
casting='safe')
except ValueError as detail:
if 'unknown type object' in str(detail):
pass
except Exception as detail:
raise TypeError(str(detail))
if result is None:
result = _where_standard(cond, a, b)
return result
# turn myself on
set_use_numexpr(get_option('compute.use_numexpr'))
def _has_bool_dtype(x):
try:
return x.dtype == bool
except AttributeError:
try:
return 'bool' in x.dtypes
except AttributeError:
return isinstance(x, (bool, np.bool_))
def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('/', '//', '**')),
unsupported=None):
if unsupported is None:
unsupported = {'+': '|', '*': '&', '-': '^'}
if _has_bool_dtype(a) and _has_bool_dtype(b):
if op_str in unsupported:
warnings.warn("evaluating in Python space because the {op!r} "
"operator is not supported by numexpr for "
"the bool dtype, use {alt_op!r} instead"
.format(op=op_str, alt_op=unsupported[op_str]))
return False
if op_str in not_allowed:
raise NotImplementedError("operator {op!r} not implemented for "
"bool dtypes".format(op=op_str))
return True
def evaluate(op, op_str, a, b, use_numexpr=True,
**eval_kwargs):
""" evaluate and return the expression of the op on a and b
Parameters
----------
op : the actual operand
op_str: the string version of the op
a : left operand
b : right operand
use_numexpr : whether to try to use numexpr (default True)
"""
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b, **eval_kwargs)
return _evaluate_standard(op, op_str, a, b)
def where(cond, a, b, use_numexpr=True):
""" evaluate the where condition cond on a and b
Parameters
----------
cond : a boolean array
a : return if cond is True
b : return if cond is False
use_numexpr : whether to try to use numexpr (default True)
"""
if use_numexpr:
return _where(cond, a, b)
return _where_standard(cond, a, b)
def set_test_mode(v=True):
"""
Keeps track of whether numexpr was used. Stores an additional ``True``
for every successful use of evaluate with numexpr since the last
``get_test_result``
"""
global _TEST_MODE, _TEST_RESULT
_TEST_MODE = v
_TEST_RESULT = []
def _store_test_result(used_numexpr):
global _TEST_RESULT
if used_numexpr:
_TEST_RESULT.append(used_numexpr)
def get_test_result():
"""get test result and reset test_results"""
global _TEST_RESULT
res = _TEST_RESULT
_TEST_RESULT = []
return res
| bsd-3-clause |
jlegendary/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
darcamo/pyphysim | apps/ia/ia_SINRs_and_capacity.py | 1 | 5134 | #!/usr/bin/env python
"""module docstring"""
try:
import cPickle as pickle
except ImportError as e: # pragma: no cover
import pickle
import numpy as np
from matplotlib import pyplot as plt
from pandas import DataFrame
import pyphysim.channels.multiuser
from pyphysim.ia.algorithms import (AlternatingMinIASolver, MaxSinrIASolver,
MMSEIASolver)
from pyphysim.progressbar import ProgressbarText
from pyphysim.util.conversion import dB2Linear
def calc_SINRs_and_capacity(solver):
"""
Calculates the SINRs.
Parameters
----------
solver : T < IASolverBaseClass
The IA solver.
"""
SINRs = solver.calc_SINR_in_dB()
sinrs = solver.calc_SINR()
calc_capacity = lambda sirn: np.sum(np.log2(1 + sirn))
capacity = np.array(list(map(calc_capacity, sinrs)))
sum_capacity = np.sum(capacity)
return SINRs, capacity, sum_capacity
if __name__ == '__main__':
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
K = 3
Nr = 4
Nt = 4
Ns = 2
SNR = 5
max_iterations = 2000
P = 1.0
initialize_with = 'alt_min'
# ---------------------------------------------------------------------
noise_var = 1. / dB2Linear(SNR)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
multiuserchannel = pyphysim.channels.multiuser.MultiUserChannelMatrix()
multiuserchannel.randomize(Nr, Nt, K)
multiuserchannel.noise_var = noise_var
alt_min_solver = AlternatingMinIASolver(multiuserchannel)
alt_min_solver.max_iterations = max_iterations
# alt_min_solver.noise_var = noise_var
max_sinr_solver = MaxSinrIASolver(multiuserchannel)
max_sinr_solver.max_iterations = max_iterations
# max_sinr_solver.noise_var = noise_var
max_sinr_solver.initialize_with = 'alt_min'
mmse_solver = MMSEIASolver(multiuserchannel)
mmse_solver.max_iterations = max_iterations
# mmse_solver.noise_var = noise_var
mmse_solver.initialize_with = 'alt_min'
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
calc_capacity = lambda sirn: np.sum(np.log2(1 + sirn))
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
rep_max = 100
alt_min_SINRs = np.empty(rep_max, dtype=np.ndarray)
max_sinr_SINRs = np.empty(rep_max, dtype=np.ndarray)
mmse_SINRs = np.empty(rep_max, dtype=np.ndarray)
alt_min_capacity = np.empty(rep_max, dtype=np.ndarray)
max_sinr_capacity = np.empty(rep_max, dtype=np.ndarray)
mmse_capacity = np.empty(rep_max, dtype=np.ndarray)
alt_min_sum_capacity = np.empty(rep_max, dtype=float)
max_sinr_sum_capacity = np.empty(rep_max, dtype=float)
mmse_sum_capacity = np.empty(rep_max, dtype=float)
alt_min_runned_iterations = np.empty(rep_max, dtype=int)
max_sinr_runned_iterations = np.empty(rep_max, dtype=int)
mmse_runned_iterations = np.empty(rep_max, dtype=int)
pbar = ProgressbarText(rep_max,
message="Simulating for SNR: {0}".format(SNR))
for rep in range(rep_max):
multiuserchannel.randomize(Nr, Nt, K)
alt_min_solver.clear()
max_sinr_solver.clear()
mmse_solver.clear()
alt_min_runned_iterations[rep] = alt_min_solver.solve(Ns, P)
max_sinr_runned_iterations[rep] = max_sinr_solver.solve(Ns, P)
mmse_runned_iterations[rep] = mmse_solver.solve(Ns, P)
mmse_solver.calc_sum_capacity()
# print "Alt Min"
(alt_min_SINRs[rep], alt_min_capacity[rep],
alt_min_sum_capacity[rep]) = calc_SINRs_and_capacity(alt_min_solver)
# print "SINRs:\n{0}".format(alt_min_SINRs[rep])
# print "Capacity:\n{0}".format(alt_min_capacity[rep])
# print "Sum_Capacity: {0}".format(alt_min_sum_capacity[rep])
# print "\nMax SINR"
(max_sinr_SINRs[rep], max_sinr_capacity[rep],
max_sinr_sum_capacity[rep]) = calc_SINRs_and_capacity(max_sinr_solver)
# print "SINRs:\n{0}".format(max_sinr_SINRs[rep])
# print "Capacity:\n{0}".format(max_sinr_capacity[rep])
# print "Sum_Capacity: {0}".format(max_sinr_sum_capacity[rep])
# print "\nMMSE"
(mmse_SINRs[rep], mmse_capacity[rep],
mmse_sum_capacity[rep]) = calc_SINRs_and_capacity(mmse_solver)
# print "SINRs:\n{0}".format(mmse_SINRs[rep])
# print "Capacity:\n{0}".format(mmse_capacity[rep])
# print "Sum_Capacity: {0}".format(mmse_sum_capacity[rep])
pbar.progress(rep)
df = DataFrame({
'Min. Leakage': alt_min_sum_capacity,
'Max SINR': max_sinr_sum_capacity,
'MMSE': mmse_sum_capacity
})
df.to_csv(
'sum_capacity_{Nr}x{Nt}_{Ns}_SNR_{SNR}_{initialize_with}_init.txt'.
format(Nr=Nr, Ns=Ns, Nt=Nt, SNR=SNR, initialize_with=initialize_with),
index_label="Index")
plt.plot([sum(alt_min_capacity[a]) for a in range(50)])
plt.plot([sum(max_sinr_capacity[a]) for a in range(50)])
plt.plot([sum(mmse_capacity[a]) for a in range(50)])
plt.legend(["Min Leakage", "Max SINR", "MMSE"])
| gpl-2.0 |
great-expectations/great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_match_regex_list.py | 1 | 2995 | import logging
import pandas as pd
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.import_manager import sa
from great_expectations.expectations.metrics.map_metric import (
ColumnMapMetricProvider,
column_condition_partial,
)
from great_expectations.expectations.metrics.util import get_dialect_regex_expression
logger = logging.getLogger(__name__)
class ColumnValuesMatchRegexList(ColumnMapMetricProvider):
condition_metric_name = "column_values.match_regex_list"
condition_value_keys = (
"regex_list",
"match_on",
)
default_kwarg_values = {"match_on": "any"}
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, regex_list, match_on, **kwargs):
regex_matches = []
for regex in regex_list:
regex_matches.append(column.astype(str).str.contains(regex))
regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)
if match_on == "any":
result = regex_match_df.any(axis="columns")
elif match_on == "all":
result = regex_match_df.all(axis="columns")
else:
raise ValueError("match_on must be either 'any' or 'all'")
return result
@column_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column, regex_list, match_on, _dialect, **kwargs):
if match_on not in ["any", "all"]:
raise ValueError("match_on must be any or all")
if len(regex_list) == 0:
raise ValueError("At least one regex must be supplied in the regex_list.")
regex_expression = get_dialect_regex_expression(column, regex_list[0], _dialect)
if regex_expression is None:
logger.warning("Regex is not supported for dialect %s" % str(_dialect))
raise NotImplementedError
if match_on == "any":
condition = sa.or_(
*[
get_dialect_regex_expression(column, regex, _dialect)
for regex in regex_list
]
)
else:
condition = sa.and_(
*[
get_dialect_regex_expression(column, regex, _dialect)
for regex in regex_list
]
)
return condition
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, regex_list, match_on, **kwargs):
if match_on == "any":
return column.rlike("|".join(regex_list))
elif match_on == "all":
formatted_regex_list = ["(?={})".format(regex) for regex in regex_list]
return column.rlike("".join(formatted_regex_list))
else:
raise ValueError("match_on must be either 'any' or 'all'")
| apache-2.0 |
kaiodt/nanook | controlador_trajetoria/senoide_klancar_odometria/plot_trajetoria_senoide_klancar_odometria.py | 1 | 6383 | #! /usr/bin/env python
# coding: utf-8
###########################################################################################
###########################################################################################
## Projeto: Nanook UFC
## Autor: Kaio Douglas Teófilo Rocha
## Email: [email protected]
###########################################################################################
## Arquivo: Plotter de Ensaios do Controlador de Trajetória (Senoide - Klancar - Odometria)
###########################################################################################
###########################################################################################
from os.path import expanduser
import matplotlib.pyplot as plt
###########################################################################################
### INICIALIZAÇÃO
###########################################################################################
# Número do ensaio
ensaio = int(raw_input('Número do Ensaio: '))
# Arquivo com os dados
home = expanduser('~')
path = home + '/ros_catkin_ws/src/nanook/controlador_trajetoria'
path += '/senoide_klancar_odometria/resultados/ensaio_%d.txt' % ensaio
# Abertura do arquivo
data_file = open(path, 'r')
# Listas com os dados de cada variável coletada
samples = [] # Amostra
time_list = [] # Tempo [s]
x_ref_list = [] # Referência de posição no eixo x [m]
y_ref_list = [] # Referência de posição no eixo y [m]
theta_ref_list = [] # Referência de orientação [rad]
v_ref_list = [] # Referência de velocidade linear da base [m/s]
w_ref_list = [] # Referência de velocidade angular da base [rad/s]
x_real_list = [] # Posição real no eixo x [m]
y_real_list = [] # Posição real no eixo y [m]
theta_real_list = [] # Orientação real [rad]
x_est_list = [] # Posição estimada no eixo x [m]
y_est_list = [] # Posição estimada no eixo y [m]
theta_est_list = [] # Orientação estimada [rad]
v_list = [] # Velocidade linear da base [m/s]
w_list = [] # Velocidade angular da base [rad/s]
u_v_list = [] # Comando de velocidade linear da base [m/s]
u_w_list = [] # Comando de velocidade angulat da base [rad/s]
x_error_list = [] # Erro de posição no eixo x [m]
y_error_list = [] # Erro de posição no eixo y [m]
theta_error_list = [] # Erro de orientação [rad]
###########################################################################################
### LEITURA DO ARQUIVO
###########################################################################################
# Exibição das linhas com informações (iniciadas com #) e armazenamento de linhas
# com dados (iniciadas com *) nas respectivas listas
for line in data_file:
if '*' not in line: # Linha com informações do ensaio
print line
else: # Linha com dados
line = line.split()
samples.append(int(line[1]))
time_list.append(float(line[2]))
x_ref_list.append(float(line[3]))
y_ref_list.append(float(line[4]))
theta_ref_list.append(float(line[5]))
v_ref_list.append(float(line[6]))
w_ref_list.append(float(line[7]))
x_real_list.append(float(line[8]))
y_real_list.append(float(line[9]))
theta_real_list.append(float(line[10]))
x_est_list.append(float(line[11]))
y_est_list.append(float(line[12]))
theta_est_list.append(float(line[13]))
v_list.append(float(line[14]))
w_list.append(float(line[15]))
u_v_list.append(float(line[16]))
u_w_list.append(float(line[17]))
x_error_list.append(float(line[18]))
y_error_list.append(float(line[19]))
theta_error_list.append(float(line[20]))
# Fechamento do arquivo
data_file.close()
###########################################################################################
### GERAÇÃO DE GRÁFICOS
###########################################################################################
plt.close('all')
# Pose
plt.figure(1)
# Posição no eixo y [m] | Posição no eixo x [m]
plt.subplot(2, 2, 1)
plt.plot(x_ref_list, y_ref_list, 'r--', x_est_list, y_est_list, 'b-',
x_real_list, y_real_list, 'g-.')
plt.title('Trajetoria XY')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.grid('on')
# Posição no eixo x [m] | Tempo [s]
plt.subplot(2, 2, 2)
plt.plot(time_list, x_ref_list, 'r--', time_list, x_est_list, 'b-',
time_list, x_real_list, 'g-.')
plt.title('Posicao no Eixo X')
plt.xlabel('Tempo (s)')
plt.ylabel('x (m)')
plt.grid('on')
# Orientação [rad] | Tempo [s]
plt.subplot(2, 2, 3)
plt.plot(time_list, theta_ref_list, 'r--', time_list, theta_est_list, 'b-',
time_list, theta_real_list, 'g-.')
plt.title('Orientacao')
plt.xlabel('Tempo (s)')
plt.ylabel('theta (rad)')
plt.grid('on')
# Posição no eixo y [m] | Tempo [s]
plt.subplot(2, 2, 4)
plt.plot(time_list, y_ref_list, 'r--', time_list, y_est_list, 'b-',
time_list, y_real_list, 'g-.')
plt.title('Posicao no Eixo Y')
plt.xlabel('Tempo (s)')
plt.ylabel('y (m)')
plt.grid('on')
# Velocidade
plt.figure(2)
# Velocidade linear [m/s] | Tempo [s]
plt.subplot(1, 2, 1)
plt.plot(time_list, u_v_list, 'r--', time_list, v_list, 'b-')
plt.title('Velocidade Linear')
plt.xlabel('Tempo (s)')
plt.ylabel('v (m/s)')
plt.grid('on')
# Velocidade angular [rad/s] | Tempo [s]
plt.subplot(1, 2, 2)
plt.plot(time_list, u_w_list, 'r--', time_list, w_list, 'b-')
plt.title('Velocidade Angular')
plt.xlabel('Tempo (s)')
plt.ylabel('w (rad/s)')
plt.grid('on')
# Erros
plt.figure(3)
# Erro de posição no eixo x [m] | Tempo [s]
plt.subplot(2, 2, 1)
plt.plot(time_list, x_error_list, 'b-')
plt.title('Erro de Posicao no Eixo x')
plt.xlabel('Tempo (s)')
plt.ylabel('erro_x (m)')
plt.grid('on')
# Erro de posição no eixo y [m] | Tempo [s]
plt.subplot(2, 2, 2)
plt.plot(time_list, y_error_list, 'b-')
plt.title('Erro de Posicao no Eixo y')
plt.xlabel('Tempo (s)')
plt.ylabel('erro_y (m)')
plt.grid('on')
# Erro de orientação [rad] | Tempo [s]
plt.subplot(2, 2, 3)
plt.plot(time_list, theta_error_list, 'b-')
plt.title('Erro de Orientacao')
plt.xlabel('Tempo (s)')
plt.ylabel('erro_theta (rad)')
plt.grid('on')
# Mostrar gráficos
plt.show() | gpl-3.0 |
cjayb/mne-python | examples/preprocessing/plot_shift_evoked.py | 29 | 1245 | """
==================================
Shifting time-scale in evoked data
==================================
"""
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import tight_layout
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading evoked data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname, condition=condition, baseline=(None, 0),
proj=True)
ch_names = evoked.info['ch_names']
picks = mne.pick_channels(ch_names=ch_names, include=["MEG 2332"])
# Create subplots
f, (ax1, ax2, ax3) = plt.subplots(3)
evoked.plot(exclude=[], picks=picks, axes=ax1,
titles=dict(grad='Before time shifting'), time_unit='s')
# Apply relative time-shift of 500 ms
evoked.shift_time(0.5, relative=True)
evoked.plot(exclude=[], picks=picks, axes=ax2,
titles=dict(grad='Relative shift: 500 ms'), time_unit='s')
# Apply absolute time-shift of 500 ms
evoked.shift_time(0.5, relative=False)
evoked.plot(exclude=[], picks=picks, axes=ax3,
titles=dict(grad='Absolute shift: 500 ms'), time_unit='s')
tight_layout()
| bsd-3-clause |
cosmoharrigan/pylearn2 | pylearn2/scripts/datasets/browse_small_norb.py | 44 | 6901 | #!/usr/bin/env python
import sys
import argparse
import pickle
import warnings
import exceptions
import numpy
try:
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
pyplot = None
from pylearn2.datasets import norb
warnings.warn("This script is deprecated. Please use ./browse_norb.py "
"instead. It is kept around as a tester for deprecated class "
"datasets.norb.SmallNORB",
exceptions.DeprecationWarning)
def main():
def parse_args():
parser = argparse.ArgumentParser(
description="Browser for SmallNORB dataset.")
parser.add_argument('--which_set',
default='train',
help="'train', 'test', or the path to a .pkl file")
parser.add_argument('--zca',
default=None,
help=("if --which_set points to a .pkl "
"file storing a ZCA-preprocessed "
"NORB dataset, you can optionally "
"enter the preprocessor's .pkl "
"file path here to undo the "
"ZCA'ing for visualization "
"purposes."))
return parser.parse_args()
def get_data(args):
if args.which_set in ('train', 'test'):
dataset = norb.SmallNORB(args.which_set, True)
else:
with open(args.which_set) as norb_file:
dataset = pickle.load(norb_file)
if len(dataset.y.shape) < 2 or dataset.y.shape[1] == 1:
print("This viewer does not support NORB datasets that "
"only have classification labels.")
sys.exit(1)
if args.zca is not None:
with open(args.zca) as zca_file:
zca = pickle.load(zca_file)
dataset.X = zca.inverse(dataset.X)
num_examples = dataset.X.shape[0]
topo_shape = ((num_examples, ) +
tuple(dataset.view_converter.shape))
assert topo_shape[-1] == 1
topo_shape = topo_shape[:-1]
values = dataset.X.reshape(topo_shape)
labels = numpy.array(dataset.y, 'int')
return values, labels, dataset.which_set
args = parse_args()
values, labels, which_set = get_data(args)
# For programming convenience, internally remap the instance labels to be
# 0-4, and the azimuth labels to be 0-17. The user will still only see the
# original, unmodified label values.
instance_index = norb.SmallNORB.label_type_to_index['instance']
def remap_instances(which_set, labels):
if which_set == 'train':
new_to_old_instance = [4, 6, 7, 8, 9]
elif which_set == 'test':
new_to_old_instance = [0, 1, 2, 3, 5]
num_instances = len(new_to_old_instance)
old_to_new_instance = numpy.ndarray(10, 'int')
old_to_new_instance.fill(-1)
old_to_new_instance[new_to_old_instance] = numpy.arange(num_instances)
instance_slice = numpy.index_exp[:, instance_index]
old_instances = labels[instance_slice]
new_instances = old_to_new_instance[old_instances]
labels[instance_slice] = new_instances
azimuth_index = norb.SmallNORB.label_type_to_index['azimuth']
azimuth_slice = numpy.index_exp[:, azimuth_index]
labels[azimuth_slice] = labels[azimuth_slice] / 2
return new_to_old_instance
new_to_old_instance = remap_instances(which_set, labels)
def get_new_azimuth_degrees(scalar_label):
return 20 * scalar_label
# Maps a label vector to the corresponding index in <values>
num_labels_by_type = numpy.array(norb.SmallNORB.num_labels_by_type, 'int')
num_labels_by_type[instance_index] = len(new_to_old_instance)
label_to_index = numpy.ndarray(num_labels_by_type, 'int')
label_to_index.fill(-1)
for i, label in enumerate(labels):
label_to_index[tuple(label)] = i
assert not numpy.any(label_to_index == -1) # all elements have been set
figure, axes = pyplot.subplots(1, 2, squeeze=True)
figure.canvas.set_window_title('Small NORB dataset (%sing set)' %
which_set)
# shift subplots down to make more room for the text
figure.subplots_adjust(bottom=0.05)
num_label_types = len(norb.SmallNORB.num_labels_by_type)
current_labels = numpy.zeros(num_label_types, 'int')
current_label_type = [0, ]
label_text = figure.suptitle("title text",
x=0.1,
horizontalalignment="left")
def redraw(redraw_text, redraw_images):
if redraw_text:
cl = current_labels
lines = [
'category: %s' % norb.SmallNORB.get_category(cl[0]),
'instance: %d' % new_to_old_instance[cl[1]],
'elevation: %d' % norb.SmallNORB.get_elevation_degrees(cl[2]),
'azimuth: %d' % get_new_azimuth_degrees(cl[3]),
'lighting: %d' % cl[4]]
lt = current_label_type[0]
lines[lt] = '==> ' + lines[lt]
text = ('Up/down arrows choose label, left/right arrows change it'
'\n\n' +
'\n'.join(lines))
label_text.set_text(text)
if redraw_images:
index = label_to_index[tuple(current_labels)]
image_pair = values[index, :, :, :]
for i in range(2):
axes[i].imshow(image_pair[i, :, :], cmap='gray')
figure.canvas.draw()
def on_key_press(event):
def add_mod(arg, step, size):
return (arg + size + step) % size
def incr_label_type(step):
current_label_type[0] = add_mod(current_label_type[0],
step,
num_label_types)
def incr_label(step):
lt = current_label_type[0]
num_labels = num_labels_by_type[lt]
current_labels[lt] = add_mod(current_labels[lt], step, num_labels)
if event.key == 'up':
incr_label_type(-1)
redraw(True, False)
elif event.key == 'down':
incr_label_type(1)
redraw(True, False)
elif event.key == 'left':
incr_label(-1)
redraw(True, True)
elif event.key == 'right':
incr_label(1)
redraw(True, True)
elif event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
redraw(True, True)
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
sperka/shogun | applications/easysvm/tutpaper/svm_params.py | 26 | 12935 |
#from matplotlib import rc
#rc('text', usetex=True)
fontsize = 16
contourFontsize = 12
showColorbar = False
xmin = -1
xmax = 1
ymin = -1.05
ymax = 1
import sys,os
import numpy
import shogun
from shogun.Kernel import GaussianKernel, LinearKernel, PolyKernel
from shogun.Features import RealFeatures, BinaryLabels
from shogun.Classifier import LibSVM
from numpy import arange
import matplotlib
from matplotlib import pylab
pylab.rcParams['contour.negative_linestyle'] = 'solid'
def features_from_file(fileName) :
fileHandle = open(fileName)
fileHandle.readline()
features = []
labels = []
for line in fileHandle :
tokens = line.split(',')
labels.append(float(tokens[1]))
features.append([float(token) for token in tokens[2:]])
return RealFeatures(numpy.transpose(numpy.array(features))), features, BinaryLabels(numpy.array(labels,numpy.float))
def create_kernel(kname, features, kparam=None) :
if kname == 'gauss' :
kernel = GaussianKernel(features, features, kparam)
elif kname == 'linear':
kernel = LinearKernel(features, features)
elif kname == 'poly' :
kernel = PolyKernel(features, features, kparam, True, False)
return kernel
def svm_train(kernel, labels, C1, C2=None):
"""Trains a SVM with the given kernel"""
num_threads = 1
kernel.io.disable_progress()
svm = LibSVM(C1, kernel, labels)
if C2:
svm.set_C(C1, C2)
svm.parallel.set_num_threads(num_threads)
svm.io.disable_progress()
svm.train()
return svm
def svm_test(svm, kernel, features_train, features_test) :
"""predicts on the test examples"""
kernel.init(features_train, features_test)
output = svm.apply().get_labels()
return output
def decision_boundary_plot(svm, features, vectors, labels, kernel, fileName = None, **args) :
title = None
if 'title' in args :
title = args['title']
xlabel = None
if 'xlabel' in args :
xlabel = args['xlabel']
ylabel = None
if 'ylabel' in args :
ylabel = args['ylabel']
fontsize = 'medium'
if 'fontsize' in args :
fontsize = args['fontsize']
contourFontsize = 10
if 'contourFontsize' in args :
contourFontsize = args['contourFontsize']
showColorbar = True
if 'showColorbar' in args :
showColorbar = args['showColorbar']
show = True
if fileName is not None :
show = False
if 'show' in args :
show = args['show']
# setting up the grid
delta = 0.005
x = arange(xmin, xmax, delta)
y = arange(ymin, ymax, delta)
Z = numpy.zeros((len(x), len(y)), numpy.float_)
gridX = numpy.zeros((len(x) *len(y), 2), numpy.float_)
n = 0
for i in range(len(x)) :
for j in range(len(y)) :
gridX[n][0] = x[i]
gridX[n][1] = y[j]
n += 1
if kernel.get_name() == 'Linear' and 'customwandb' in args:
kernel.init_optimization_svm(svm)
b=svm.get_bias()
w=kernel.get_w()
kernel.set_w(args['customwandb'][0])
svm.set_bias(args['customwandb'][1])
if kernel.get_name() == 'Linear' and 'drawarrow' in args:
kernel.init_optimization_svm(svm)
b=svm.get_bias()
w=kernel.get_w()
s=1.0/numpy.dot(w,w)/1.17
pylab.arrow(0,-b/w[1], w[0]*s,s*w[1], width=0.01, fc='#dddddd', ec='k')
grid_features = RealFeatures(numpy.transpose(gridX))
results = svm_test(svm, kernel, features, grid_features)
n = 0
for i in range(len(x)) :
for j in range(len(y)) :
Z[i][j] = results[n]
n += 1
cdict = {'red' :((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
'green':((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
'blue' :((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
}
my_cmap = matplotlib.colors.LinearSegmentedColormap('lightgray',cdict,256)
im = pylab.imshow(numpy.transpose(Z),
interpolation='bilinear', origin='lower',
cmap=my_cmap, extent=(xmin,xmax,ymin,ymax) )
if 'decisionboundaryonly' in args:
C1 = pylab.contour(numpy.transpose(Z),
[0],
origin='lower',
linewidths=(3),
colors = ['k'],
extent=(xmin,xmax,ymin,ymax))
else:
C1 = pylab.contour(numpy.transpose(Z),
[-1,0,1],
origin='lower',
linewidths=(1,3,1),
colors = ['k','k'],
extent=(xmin,xmax,ymin,ymax))
pylab.clabel(C1,
inline=1,
fmt='%1.1f',
fontsize=contourFontsize)
# plot the data
lab=labels.get_labels()
vec=numpy.array(vectors)
idx=numpy.where(lab==-1)[0]
pylab.scatter(vec[idx,0], vec[idx,1], s=300, c='#4444ff', marker='o', alpha=0.8, zorder=100)
idx=numpy.where(lab==+1)[0]
pylab.scatter(vec[idx,0], vec[idx,1], s=500, c='#ff4444', marker='s', alpha=0.8, zorder=100)
# plot SVs
if not 'decisionboundaryonly' in args:
training_outputs = svm_test(svm, kernel, features, features)
sv_idx=numpy.where(abs(training_outputs)<=1.01)[0]
pylab.scatter(vec[sv_idx,0], vec[sv_idx,1], s=100, c='k', marker='o', alpha=0.8, zorder=100)
if 'showmovedpoint' in args:
x=-0.779838709677
y=-0.1375
pylab.scatter([x], [y], s=300, c='#4e4e61', marker='o', alpha=1, zorder=100, edgecolor='#454548')
pylab.arrow(x,y-0.1, 0, -0.8/1.5, width=0.01, fc='#dddddd', ec='k')
#pylab.show()
if title is not None :
pylab.title(title, fontsize=fontsize)
if ylabel:
pylab.ylabel(ylabel,fontsize=fontsize)
if xlabel:
pylab.xlabel(xlabel,fontsize=fontsize)
if showColorbar :
pylab.colorbar(im)
# colormap:
pylab.hot()
if fileName is not None :
pylab.savefig(fileName)
if show :
pylab.show()
def add_percent_ticks():
ticks=pylab.getp(pylab.gca(),'xticks')
ticklabels=len(ticks)*['']
ticklabels[0]='0%'
ticklabels[-1]='100%'
pylab.setp(pylab.gca(), xticklabels=ticklabels)
pylab.setp(pylab.gca(), yticklabels=['0%','100%'])
ticks=pylab.getp(pylab.gca(),'yticks')
ticklabels=len(ticks)*['']
#ticklabels[0]='0%'
ticklabels[-1]='100%'
pylab.setp(pylab.gca(), yticklabels=ticklabels)
xticklabels = pylab.getp(pylab.gca(), 'xticklabels')
yticklabels = pylab.getp(pylab.gca(), 'yticklabels')
pylab.setp(xticklabels, fontsize=fontsize)
pylab.setp(yticklabels, fontsize=fontsize)
def create_figures(extension = 'pdf', directory = '../../tex/figures') :
if extension[0] != '.' :
extension = '.' + extension
dpi=90
# data and linear decision boundary
features,vectors,labels = features_from_file('data/small_gc_toy.data')
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 0.7)
pylab.figure(figsize=(8,6), dpi=dpi)
decision_boundary_plot(svm, features, vectors, labels, kernel,
fontsize=fontsize, contourFontsize=contourFontsize,
title="Linear Separation", customwandb=(numpy.array([-0.05, -1.0]), -0.3),
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
show=False, showColorbar=showColorbar, decisionboundaryonly=True)
add_percent_ticks()
pylab.savefig(os.path.join(directory, 'data_and_linear_classifier' + extension))
pylab.close()
#####################################################################################
# data and svm decision boundary
features,vectors,labels = features_from_file('data/small_gc_toy.data')
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 100)
pylab.figure(figsize=(8,6), dpi=dpi)
decision_boundary_plot(svm, features, vectors, labels, kernel,
fontsize=fontsize, contourFontsize=contourFontsize,
title="Maximum Margin Separation", drawarrow=True,
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
show=False, showColorbar=showColorbar)
add_percent_ticks()
pylab.savefig(os.path.join(directory, 'data_and_svm_classifier' + extension))
pylab.close()
#####################################################################################
# the effect of C on the decision surface:
features,vectors,labels = features_from_file('data/small_gc_toy_outlier.data')
pylab.figure(figsize=(16,6), dpi=dpi)
pylab.subplot(121)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 200)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Soft-Margin with C=200', ylabel="GC Content Before 'AG'",
xlabel="GC Content After 'AG'", fontsize=fontsize,
contourFontsize=contourFontsize, show=False, showmovedpoint=True,
showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(122)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 2)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Soft-Margin with C=2',
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False, showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'effect_of_c' + extension))
pylab.close()
####################################################################################
# playing with nonlinear data:
# the effect of kernel parameters
features,vectors,labels = features_from_file('data/small_gc_toy_outlier.data')
pylab.figure(figsize=(24,6), dpi=dpi)
pylab.subplot(131)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Linear Kernel',
ylabel="GC Content Before 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(132)
kernel = create_kernel('poly', features, 2)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Polynomial Kernel d=2',
xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(133)
kernel = create_kernel('poly', features, 5)
svm = svm_train(kernel, labels, 10)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Polynomial Kernel d=5',
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'params_polynomial' + extension))
pylab.close()
####################################################################################
#effects of sigma
pylab.figure(figsize=(24,6), dpi=dpi)
pylab.subplot(131)
gamma = 0.1
sigma = 20.0
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=20',
ylabel="GC Content Before 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(132)
sigma = 1.0
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=1',
xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(133)
sigma = 0.05
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=0.05',
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'params_gaussian' + extension))
pylab.close()
####################################################################################
if __name__ == '__main__' :
extension = 'pdf'
if len(sys.argv) > 1 :
extension = sys.argv[1]
pylab.ioff()
create_figures(extension)
| gpl-3.0 |
themrmax/scikit-learn | sklearn/cluster/k_means_.py | 8 | 60187 | """K-means clustering"""
# Authors: Gael Varoquaux <[email protected]>
# Thomas Rueckstiess <[email protected]>
# James Bergstra <[email protected]>
# Jan Schlueter <[email protected]>
# Nelle Varoquaux
# Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..metrics.pairwise import pairwise_distances_argmin_min
from ..utils.extmath import row_norms, squared_norm, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : numpy.RandomState
The generator used to initialize the centers.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# Validate init array
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise ValueError("algorithm='elkan' not supported for sparse input X")
X = check_array(X, order="C")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
x_squared_norms : array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# Breakup nearest neighbor distance computation into batches to prevent
# memory blowup in the case of a large number of samples and clusters.
# TODO: Once PR #7383 is merged use check_inputs=False in metric_kwargs.
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=centers, metric='euclidean', metric_kwargs={'squared': True})
# cython k-means code assumes int32 inputs
labels = labels.astype(np.int32)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X : float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms : array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances : float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels : int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
x_squared_norms : array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[ 1., 2.],
[ 4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = random_state.choice(X.shape[0], replace=False,
size=n_reassigns)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X, new_centers.astype(np.intp),
np.where(to_reassign)[0].astype(np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| bsd-3-clause |
zzcclp/spark | python/pyspark/pandas/utils.py | 9 | 34052 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Commonly used utils in pandas-on-Spark.
"""
import functools
from collections import OrderedDict
from contextlib import contextmanager
import os
from typing import ( # noqa: F401 (SPARK-34943)
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
Union,
TYPE_CHECKING,
cast,
no_type_check,
overload,
)
import warnings
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame, SparkSession
from pyspark.sql.types import DoubleType
import pandas as pd
from pandas.api.types import is_list_like
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas._typing import Axis, Label, Name, DataFrameOrSeries
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef.typehints import as_spark_type
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from pyspark.pandas.base import IndexOpsMixin # noqa: F401 (SPARK-34943)
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.internal import InternalFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
ERROR_MESSAGE_CANNOT_COMBINE = (
"Cannot combine the series or dataframe because it comes from a different dataframe. "
"In order to allow this operation, enable 'compute.ops_on_diff_frames' option."
)
SPARK_CONF_ARROW_ENABLED = "spark.sql.execution.arrow.pyspark.enabled"
def same_anchor(
this: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
that: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
) -> bool:
"""
Check if the anchors of the given DataFrame or Series are the same or not.
"""
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
if isinstance(this, InternalFrame):
this_internal = this
else:
assert isinstance(this, (DataFrame, IndexOpsMixin)), type(this)
this_internal = this._internal
if isinstance(that, InternalFrame):
that_internal = that
else:
assert isinstance(that, (DataFrame, IndexOpsMixin)), type(that)
that_internal = that._internal
return (
this_internal.spark_frame is that_internal.spark_frame
and this_internal.index_level == that_internal.index_level
and all(
spark_column_equals(this_scol, that_scol)
for this_scol, that_scol in zip(
this_internal.index_spark_columns, that_internal.index_spark_columns
)
)
)
def combine_frames(
this: "DataFrame",
*args: DataFrameOrSeries,
how: str = "full",
preserve_order_column: bool = False
) -> "DataFrame":
"""
This method combines `this` DataFrame with a different `that` DataFrame or
Series from a different DataFrame.
It returns a DataFrame that has prefix `this_` and `that_` to distinct
the columns names from both DataFrames
It internally performs a join operation which can be expensive in general.
So, if `compute.ops_on_diff_frames` option is False,
this method throws an exception.
"""
from pyspark.pandas.config import get_option
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.series import Series
if all(isinstance(arg, Series) for arg in args):
assert all(
same_anchor(arg, args[0]) for arg in args
), "Currently only one different DataFrame (from given Series) is supported"
assert not same_anchor(this, args[0]), "We don't need to combine. All series is in this."
that = args[0]._psdf[list(args)]
elif len(args) == 1 and isinstance(args[0], DataFrame):
assert isinstance(args[0], DataFrame)
assert not same_anchor(
this, args[0]
), "We don't need to combine. `this` and `that` are same."
that = args[0]
else:
raise AssertionError("args should be single DataFrame or " "single/multiple Series")
if get_option("compute.ops_on_diff_frames"):
def resolve(internal: InternalFrame, side: str) -> InternalFrame:
rename = lambda col: "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = internal.spark_frame.select(
*[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
],
*HIDDEN_COLUMNS
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
index_fields=[
field.copy(name=rename(field.name)) for field in internal.index_fields
],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],
)
this_internal = resolve(this._internal, "this")
that_internal = resolve(that._internal, "that")
this_index_map = list(
zip(
this_internal.index_spark_column_names,
this_internal.index_names,
this_internal.index_fields,
)
)
that_index_map = list(
zip(
that_internal.index_spark_column_names,
that_internal.index_names,
that_internal.index_fields,
)
)
assert len(this_index_map) == len(that_index_map)
join_scols = []
merged_index_scols = []
# Note that the order of each element in index_map is guaranteed according to the index
# level.
this_and_that_index_map = list(zip(this_index_map, that_index_map))
this_sdf = this_internal.spark_frame.alias("this")
that_sdf = that_internal.spark_frame.alias("that")
# If the same named index is found, that's used.
index_column_names = []
index_use_extension_dtypes = []
for (
i,
((this_column, this_name, this_field), (that_column, that_name, that_field)),
) in enumerate(this_and_that_index_map):
if this_name == that_name:
# We should merge the Spark columns into one
# to mimic pandas' behavior.
this_scol = scol_for(this_sdf, this_column)
that_scol = scol_for(that_sdf, that_column)
join_scol = this_scol == that_scol
join_scols.append(join_scol)
column_name = SPARK_INDEX_NAME_FORMAT(i)
index_column_names.append(column_name)
index_use_extension_dtypes.append(
any(field.is_extension_dtype for field in [this_field, that_field])
)
merged_index_scols.append(
F.when(this_scol.isNotNull(), this_scol).otherwise(that_scol).alias(column_name)
)
else:
raise ValueError("Index names must be exactly matched currently.")
assert len(join_scols) > 0, "cannot join with no overlapping index names"
joined_df = this_sdf.join(that_sdf, on=join_scols, how=how)
if preserve_order_column:
order_column = [scol_for(this_sdf, NATURAL_ORDER_COLUMN_NAME)]
else:
order_column = []
joined_df = joined_df.select(
*merged_index_scols,
*(
scol_for(this_sdf, this_internal.spark_column_name_for(label))
for label in this_internal.column_labels
),
*(
scol_for(that_sdf, that_internal.spark_column_name_for(label))
for label in that_internal.column_labels
),
*order_column
)
index_spark_columns = [scol_for(joined_df, col) for col in index_column_names]
index_columns = set(index_column_names)
new_data_columns = [
col
for col in joined_df.columns
if col not in index_columns and col != NATURAL_ORDER_COLUMN_NAME
]
schema = joined_df.select(*index_spark_columns, *new_data_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field, use_extension_dtypes=use_extension_dtypes)
for struct_field, use_extension_dtypes in zip(
schema.fields[: len(index_spark_columns)], index_use_extension_dtypes
)
]
data_fields = [
InternalField.from_struct_field(
struct_field, use_extension_dtypes=field.is_extension_dtype
)
for struct_field, field in zip(
schema.fields[len(index_spark_columns) :],
this_internal.data_fields + that_internal.data_fields,
)
]
level = max(this_internal.column_labels_level, that_internal.column_labels_level)
def fill_label(label: Optional[Label]) -> List:
if label is None:
return ([""] * (level - 1)) + [None]
else:
return ([""] * (level - len(label))) + list(label)
column_labels = [
tuple(["this"] + fill_label(label)) for label in this_internal.column_labels
] + [tuple(["that"] + fill_label(label)) for label in that_internal.column_labels]
column_label_names = (
cast(List[Optional[Label]], [None]) * (1 + level - this_internal.column_labels_level)
) + this_internal.column_label_names
return DataFrame(
InternalFrame(
spark_frame=joined_df,
index_spark_columns=index_spark_columns,
index_names=this_internal.index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(joined_df, col) for col in new_data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
def align_diff_frames(
resolve_func: Callable[
["DataFrame", List[Label], List[Label]], Iterator[Tuple["Series", Label]]
],
this: "DataFrame",
that: "DataFrame",
fillna: bool = True,
how: str = "full",
preserve_order_column: bool = False,
) -> "DataFrame":
"""
This method aligns two different DataFrames with a given `func`. Columns are resolved and
handled within the given `func`.
To use this, `compute.ops_on_diff_frames` should be True, for now.
:param resolve_func: Takes aligned (joined) DataFrame, the column of the current DataFrame, and
the column of another DataFrame. It returns an iterable that produces Series.
>>> from pyspark.pandas.config import set_option, reset_option
>>>
>>> set_option("compute.ops_on_diff_frames", True)
>>>
>>> psdf1 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>> psdf2 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>>
>>> def func(psdf, this_column_labels, that_column_labels):
... psdf # conceptually this is A + B.
...
... # Within this function, Series from A or B can be performed against `psdf`.
... this_label = this_column_labels[0] # this is ('a',) from psdf1.
... that_label = that_column_labels[0] # this is ('a',) from psdf2.
... new_series = (psdf[this_label] - psdf[that_label]).rename(str(this_label))
...
... # This new series will be placed in new DataFrame.
... yield (new_series, this_label)
>>>
>>>
>>> align_diff_frames(func, psdf1, psdf2).sort_index()
a
0 0
1 0
2 0
3 0
4 0
5 0
6 0
7 0
8 0
>>> reset_option("compute.ops_on_diff_frames")
:param this: a DataFrame to align
:param that: another DataFrame to align
:param fillna: If True, it fills missing values in non-common columns in both `this` and `that`.
Otherwise, it returns as are.
:param how: join way. In addition, it affects how `resolve_func` resolves the column conflict.
- full: `resolve_func` should resolve only common columns from 'this' and 'that' DataFrames.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` and
'that_columns' in this function are B, C and B, C.
- left: `resolve_func` should resolve columns including that columns.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` is
B, C but `that_columns` are B, C, D.
- inner: Same as 'full' mode; however, internally performs inner join instead.
:return: Aligned DataFrame
"""
from pyspark.pandas.frame import DataFrame
assert how == "full" or how == "left" or how == "inner"
this_column_labels = this._internal.column_labels
that_column_labels = that._internal.column_labels
common_column_labels = set(this_column_labels).intersection(that_column_labels)
# 1. Perform the join given two dataframes.
combined = combine_frames(this, that, how=how, preserve_order_column=preserve_order_column)
# 2. Apply the given function to transform the columns in a batch and keep the new columns.
combined_column_labels = combined._internal.column_labels
that_columns_to_apply = [] # type: List[Label]
this_columns_to_apply = [] # type: List[Label]
additional_that_columns = [] # type: List[Label]
columns_to_keep = [] # type: List[Union[Series, Column]]
column_labels_to_keep = [] # type: List[Label]
for combined_label in combined_column_labels:
for common_label in common_column_labels:
if combined_label == tuple(["this", *common_label]):
this_columns_to_apply.append(combined_label)
break
elif combined_label == tuple(["that", *common_label]):
that_columns_to_apply.append(combined_label)
break
else:
if how == "left" and combined_label in [
tuple(["that", *label]) for label in that_column_labels
]:
# In this case, we will drop `that_columns` in `columns_to_keep` but passes
# it later to `func`. `func` should resolve it.
# Note that adding this into a separate list (`additional_that_columns`)
# is intentional so that `this_columns` and `that_columns` can be paired.
additional_that_columns.append(combined_label)
elif fillna:
columns_to_keep.append(SF.lit(None).cast(DoubleType()).alias(str(combined_label)))
column_labels_to_keep.append(combined_label)
else:
columns_to_keep.append(combined._psser_for(combined_label))
column_labels_to_keep.append(combined_label)
that_columns_to_apply += additional_that_columns
# Should extract columns to apply and do it in a batch in case
# it adds new columns for example.
if len(this_columns_to_apply) > 0 or len(that_columns_to_apply) > 0:
psser_set, column_labels_set = zip(
*resolve_func(combined, this_columns_to_apply, that_columns_to_apply)
)
columns_applied = list(psser_set) # type: List[Union[Series, Column]]
column_labels_applied = list(column_labels_set) # type: List[Label]
else:
columns_applied = []
column_labels_applied = []
applied = DataFrame(
combined._internal.with_new_columns(
columns_applied + columns_to_keep,
column_labels=column_labels_applied + column_labels_to_keep,
)
) # type: DataFrame
# 3. Restore the names back and deduplicate columns.
this_labels = OrderedDict()
# Add columns in an order of its original frame.
for this_label in this_column_labels:
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels and this_label == new_label[1:]:
this_labels[new_label[1:]] = new_label
# After that, we will add the rest columns.
other_labels = OrderedDict()
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels:
other_labels[new_label[1:]] = new_label
psdf = applied[list(this_labels.values()) + list(other_labels.values())]
psdf.columns = psdf.columns.droplevel()
return psdf
def is_testing() -> bool:
"""Indicates whether Spark is currently running tests."""
return "SPARK_TESTING" in os.environ
def default_session(conf: Optional[Dict[str, Any]] = None) -> SparkSession:
if conf is None:
conf = dict()
builder = SparkSession.builder.appName("pandas-on-Spark")
for key, value in conf.items():
builder = builder.config(key, value)
# Currently, pandas-on-Spark is dependent on such join due to 'compute.ops_on_diff_frames'
# configuration. This is needed with Spark 3.0+.
builder.config("spark.sql.analyzer.failAmbiguousSelfJoin", False)
if is_testing():
builder.config("spark.executor.allowSparkContext", False)
return builder.getOrCreate()
@contextmanager
def sql_conf(pairs: Dict[str, Any], *, spark: Optional[SparkSession] = None) -> Iterator[None]:
"""
A convenient context manager to set `value` to the Spark SQL configuration `key` and
then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
if spark is None:
spark = default_session()
keys = pairs.keys()
new_values = pairs.values()
old_values = [spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
spark.conf.unset(key)
else:
spark.conf.set(key, old_value)
def validate_arguments_and_invoke_function(
pobj: Union[pd.DataFrame, pd.Series],
pandas_on_spark_func: Callable,
pandas_func: Callable,
input_args: Dict,
) -> Any:
"""
Invokes a pandas function.
This is created because different versions of pandas support different parameters, and as a
result when we code against the latest version, our users might get a confusing
"got an unexpected keyword argument" error if they are using an older version of pandas.
This function validates all the arguments, removes the ones that are not supported if they
are simply the default value (i.e. most likely the user didn't explicitly specify it). It
throws a TypeError if the user explicitly specify an argument that is not supported by the
pandas version available.
For example usage, look at DataFrame.to_html().
:param pobj: the pandas DataFrame or Series to operate on
:param pandas_on_spark_func: pandas-on-Spark function, used to get default parameter values
:param pandas_func: pandas function, used to check whether pandas supports all the arguments
:param input_args: arguments to pass to the pandas function, often created by using locals().
Make sure locals() call is at the top of the function so it captures only
input parameters, rather than local variables.
:return: whatever pandas_func returns
"""
import inspect
# Makes a copy since whatever passed in is likely created by locals(), and we can't delete
# 'self' key from that.
args = input_args.copy()
del args["self"]
if "kwargs" in args:
# explode kwargs
kwargs = args["kwargs"]
del args["kwargs"]
args = {**args, **kwargs}
pandas_on_spark_params = inspect.signature(pandas_on_spark_func).parameters
pandas_params = inspect.signature(pandas_func).parameters
for param in pandas_on_spark_params.values():
if param.name not in pandas_params:
if args[param.name] == param.default:
del args[param.name]
else:
raise TypeError(
(
"The pandas version [%s] available does not support parameter '%s' "
+ "for function '%s'."
)
% (pd.__version__, param.name, pandas_func.__name__)
)
args["self"] = pobj
return pandas_func(**args)
@no_type_check
def lazy_property(fn: Callable[[Any], Any]) -> property:
"""
Decorator that makes a property lazy-evaluated.
Copied from https://stevenloria.com/lazy-properties/
"""
attr_name = "_lazy_" + fn.__name__
@property
@functools.wraps(fn)
def wrapped_lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
def deleter(self):
if hasattr(self, attr_name):
delattr(self, attr_name)
return wrapped_lazy_property.deleter(deleter)
def scol_for(sdf: SparkDataFrame, column_name: str) -> Column:
"""Return Spark Column for the given column name."""
return sdf["`{}`".format(column_name)]
def column_labels_level(column_labels: List[Label]) -> int:
"""Return the level of the column index."""
if len(column_labels) == 0:
return 1
else:
levels = set(1 if label is None else len(label) for label in column_labels)
assert len(levels) == 1, levels
return list(levels)[0]
def name_like_string(name: Optional[Name]) -> str:
"""
Return the name-like strings from str or tuple of str
Examples
--------
>>> name = 'abc'
>>> name_like_string(name)
'abc'
>>> name = ('abc',)
>>> name_like_string(name)
'abc'
>>> name = ('a', 'b', 'c')
>>> name_like_string(name)
'(a, b, c)'
"""
if name is None:
label = ("__none__",) # type: Label
elif is_list_like(name):
label = tuple([str(n) for n in name])
else:
label = (str(name),)
return ("(%s)" % ", ".join(label)) if len(label) > 1 else label[0]
def is_name_like_tuple(value: Any, allow_none: bool = True, check_type: bool = False) -> bool:
"""
Check the given tuple is be able to be used as a name.
Examples
--------
>>> is_name_like_tuple(('abc',))
True
>>> is_name_like_tuple((1,))
True
>>> is_name_like_tuple(('abc', 1, None))
True
>>> is_name_like_tuple(('abc', 1, None), check_type=True)
True
>>> is_name_like_tuple((1.0j,))
True
>>> is_name_like_tuple(tuple())
False
>>> is_name_like_tuple((list('abc'),))
False
>>> is_name_like_tuple(('abc', 1, None), allow_none=False)
False
>>> is_name_like_tuple((1.0j,), check_type=True)
False
"""
if value is None:
return allow_none
elif not isinstance(value, tuple):
return False
elif len(value) == 0:
return False
elif not allow_none and any(v is None for v in value):
return False
elif any(is_list_like(v) or isinstance(v, slice) for v in value):
return False
elif check_type:
return all(
v is None or as_spark_type(type(v), raise_error=False) is not None for v in value
)
else:
return True
def is_name_like_value(
value: Any, allow_none: bool = True, allow_tuple: bool = True, check_type: bool = False
) -> bool:
"""
Check the given value is like a name.
Examples
--------
>>> is_name_like_value('abc')
True
>>> is_name_like_value(1)
True
>>> is_name_like_value(None)
True
>>> is_name_like_value(('abc',))
True
>>> is_name_like_value(1.0j)
True
>>> is_name_like_value(list('abc'))
False
>>> is_name_like_value(None, allow_none=False)
False
>>> is_name_like_value(('abc',), allow_tuple=False)
False
>>> is_name_like_value(1.0j, check_type=True)
False
"""
if value is None:
return allow_none
elif isinstance(value, tuple):
return allow_tuple and is_name_like_tuple(
value, allow_none=allow_none, check_type=check_type
)
elif is_list_like(value) or isinstance(value, slice):
return False
elif check_type:
return as_spark_type(type(value), raise_error=False) is not None
else:
return True
def validate_axis(axis: Optional[Axis] = 0, none_axis: int = 0) -> int:
"""Check the given axis is valid."""
# convert to numeric axis
axis = cast(Dict[Optional[Axis], int], {None: none_axis, "index": 0, "columns": 1}).get(
axis, axis
)
if axis in (none_axis, 0, 1):
return cast(int, axis)
else:
raise ValueError("No axis named {0}".format(axis))
def validate_bool_kwarg(value: Any, arg_name: str) -> Optional[bool]:
"""Ensures that argument passed in arg_name is of type bool."""
if not (isinstance(value, bool) or value is None):
raise TypeError(
'For argument "{}" expected type bool, received '
"type {}.".format(arg_name, type(value).__name__)
)
return value
def validate_how(how: str) -> str:
"""Check the given how for join is valid."""
if how == "full":
warnings.warn(
"Warning: While pandas-on-Spark will accept 'full', you should use 'outer' "
+ "instead to be compatible with the pandas merge API",
UserWarning,
)
if how == "outer":
# 'outer' in pandas equals 'full' in Spark
how = "full"
if how not in ("inner", "left", "right", "full"):
raise ValueError(
"The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']",
)
return how
@overload
def verify_temp_column_name(df: SparkDataFrame, column_name_or_label: str) -> str:
...
@overload
def verify_temp_column_name(df: "DataFrame", column_name_or_label: Name) -> Label:
...
def verify_temp_column_name(
df: Union["DataFrame", SparkDataFrame], column_name_or_label: Union[str, Name]
) -> Union[str, Label]:
"""
Verify that the given column name does not exist in the given pandas-on-Spark or
Spark DataFrame.
The temporary column names should start and end with `__`. In addition, `column_name_or_label`
expects a single string, or column labels when `df` is a pandas-on-Spark DataFrame.
>>> psdf = ps.DataFrame({("x", "a"): ['a', 'b', 'c']})
>>> psdf["__dummy__"] = 0
>>> psdf[("", "__dummy__")] = 1
>>> psdf # doctest: +NORMALIZE_WHITESPACE
x __dummy__
a __dummy__
0 a 0 1
1 b 0 1
2 c 0 1
>>> verify_temp_column_name(psdf, '__tmp__')
('__tmp__', '')
>>> verify_temp_column_name(psdf, ('', '__tmp__'))
('', '__tmp__')
>>> verify_temp_column_name(psdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `(__dummy__, )` ...
>>> verify_temp_column_name(psdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: ... `(, __dummy__)` ...
>>> verify_temp_column_name(psdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('dummy', '')
>>> verify_temp_column_name(psdf, ('', 'dummy'))
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('', 'dummy')
>>> internal = psdf._internal.resolved_copy
>>> sdf = internal.spark_frame
>>> sdf.select(internal.data_spark_columns).show() # doctest: +NORMALIZE_WHITESPACE
+------+---------+-------------+
|(x, a)|__dummy__|(, __dummy__)|
+------+---------+-------------+
| a| 0| 1|
| b| 0| 1|
| c| 0| 1|
+------+---------+-------------+
>>> verify_temp_column_name(sdf, '__tmp__')
'__tmp__'
>>> verify_temp_column_name(sdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `__dummy__` ... '(x, a)', '__dummy__', '(, __dummy__)', ...
>>> verify_temp_column_name(sdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: <class 'tuple'>
>>> verify_temp_column_name(sdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should start and end with `__`: dummy
"""
from pyspark.pandas.frame import DataFrame
if isinstance(df, DataFrame):
if isinstance(column_name_or_label, str):
column_name = column_name_or_label
level = df._internal.column_labels_level
column_name_or_label = tuple([column_name_or_label] + ([""] * (level - 1)))
else:
column_name = name_like_string(column_name_or_label)
assert any(len(label) > 0 for label in column_name_or_label) and all(
label == "" or (label.startswith("__") and label.endswith("__"))
for label in column_name_or_label
), "The temporary column name should be empty or start and end with `__`: {}".format(
column_name_or_label
)
assert all(
column_name_or_label != label for label in df._internal.column_labels
), "The given column name `{}` already exists in the pandas-on-Spark DataFrame: {}".format(
name_like_string(column_name_or_label), df.columns
)
df = df._internal.resolved_copy.spark_frame
else:
assert isinstance(column_name_or_label, str), type(column_name_or_label)
assert column_name_or_label.startswith("__") and column_name_or_label.endswith(
"__"
), "The temporary column name should start and end with `__`: {}".format(
column_name_or_label
)
column_name = column_name_or_label
assert isinstance(df, SparkDataFrame), type(df)
assert (
column_name not in df.columns
), "The given column name `{}` already exists in the Spark DataFrame: {}".format(
column_name, df.columns
)
return column_name_or_label
def spark_column_equals(left: Column, right: Column) -> bool:
"""
Check both `left` and `right` have the same expressions.
>>> spark_column_equals(SF.lit(0), SF.lit(0))
True
>>> spark_column_equals(SF.lit(0) + 1, SF.lit(0) + 1)
True
>>> spark_column_equals(SF.lit(0) + 1, SF.lit(0) + 2)
False
>>> sdf1 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf1["x"] + 1)
True
>>> sdf2 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf2["x"] + 1)
False
"""
return left._jc.equals(right._jc) # type: ignore
def compare_null_first(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNull() & right.isNotNull()
)
def compare_null_last(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNotNull() & right.isNull()
)
def compare_disallow_null(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return left.isNotNull() & right.isNotNull() & comp(left, right)
def compare_allow_null(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return left.isNull() | right.isNull() | comp(left, right)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.utils
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.utils.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.utils tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.utils,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
alistairlow/tensorflow | tensorflow/python/estimator/canned/dnn_test.py | 31 | 16390 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
class DNNLogitFnTest(dnn_testing_utils.BaseDNNLogitFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNLogitFnTest.__init__(self,
dnn._dnn_logit_fn_builder)
class DNNClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
class DNNRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
| apache-2.0 |
mattilyra/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
sebalander/sebaPhD | calibrateRegularCameraIntr.py | 2 | 10632 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 5 16:30:53 2016
calibrates using fisheye distortion model (polynomial in theta)
help in
http://docs.opencv.org/ref/master/d9/d0c/group__calib3d.html#ga3207604e4b1a1758aa66acb6ed5aa65d&gsc.tab=0
@author: sebalander
"""
# %%
import cv2
import numpy as np
from numpy import zeros
import glob
import matplotlib.pyplot as plt
#from scipy import linalg
import poseCalibration as pc
from lmfit import minimize, Parameters
import poseRationalCalibration as rational
# %% ========== ========== RATIONAL PARAMETER HANDLING ========== ==========
def formatParametersChessIntrs(rVecs, tVecs, linearCoeffs, distCoeffs):
'''
set to vary all parameetrs
'''
params = Parameters()
for j in range(len(rVecs)):
for i in range(3):
params.add('rvec%d%d'%(j,i),
value=rVecs[j,i,0], vary=True)
params.add('tvec%d%d'%(j,i),
value=tVecs[j,i,0], vary=True)
params.add('fX', value=linearCoeffs[0,0], vary=True)
params.add('fY', value=linearCoeffs[1,1], vary=True)
params.add('cX', value=linearCoeffs[0,2], vary=True)
params.add('cY', value=linearCoeffs[1,2], vary=True)
# polynomial coeffs, grade 7
# # (k1,k2,p1,p2[,k3[,k4,k5,k6[,s1,s2,s3,s4[,τx,τy]]]])
#for i in [2,3,8,9,10,11,12,13]:
# params.add('distCoeffs%d'%i,
# value=distCoeffs[i,0], vary=False)
params.add('numDist0', value=distCoeffs[0,0], vary=True)
params.add('numDist1', value=distCoeffs[1,0], vary=True)
params.add('numDist2', value=distCoeffs[4,0], vary=True)
params.add('denomDist0', value=distCoeffs[5,0], vary=True)
params.add('denomDist1', value=distCoeffs[6,0], vary=True)
params.add('denomDist2', value=distCoeffs[7,0], vary=True)
return params
# %%
def retrieveParametersChess(params):
n = len([0 for x in params.iterkeys()])/6 - 3
rvec = zeros((n,3,1))
tvec = zeros((n,3,1))
for j in range(n):
for i in range(3):
rvec[j,i,0] = params['rvec%d%d'%(j,i)].value
tvec[j,i,0] = params['tvec%d%d'%(j,i)].value
cameraMatrix = zeros((3,3))
cameraMatrix[0,0] = params['fX'].value
cameraMatrix[1,1] = params['fY'].value
cameraMatrix[0,2] = params['cX'].value
cameraMatrix[1,2] = params['cY'].value
cameraMatrix[2,2] = 1
distCoeffs = zeros((14,1))
distCoeffs[0] = params['numDist0'].value
distCoeffs[1] = params['numDist1'].value
distCoeffs[4] = params['numDist2'].value
distCoeffs[5] = params['denomDist0'].value
distCoeffs[6] = params['denomDist1'].value
distCoeffs[7] = params['denomDist2'].value
return rvec, tvec, cameraMatrix, distCoeffs
# %% change state of paramters
def setDistortionParams(params, state):
for i in [0,1,4,5,6,7]:
params['distCoeffs%d'%i].vary=state
def setLinearParams(params, state):
params['cameraMatrix0'].value = state
params['cameraMatrix1'].value = state
params['cameraMatrix2'].value = state
params['cameraMatrix3'].value = state
def setExtrinsicParams(params, state):
n = len([0 for x in params.iterkeys()])/6 - 3
for j in range(n):
for i in range(3):
params['rvec%d%d'%(j,i)].vary = state
params['tvec%d%d'%(j,i)].vary = state
# %% residual
def residualDirectChessRatio(params, fiducialPoints, corners):
'''
'''
n = len(corners)
rVecs, tVecs, linearCoeffs, distCoeffs = retrieveParametersChess(params)
E = list()
for j in range(n):
projectedCorners = rational.direct(fiducialPoints,
rVecs[j],
tVecs[j],
linearCoeffs,
distCoeffs)
err = projectedCorners[:,0,:] - corners[j,:,0,:]
E.append(err)
return np.reshape(E,(n*len(fiducialPoints[0]),2))
# %%
def calibrateDirectChessRatio(fiducialPoints, corners, rVecs, tVecs, linearCoeffs, distCoeffs):
'''
parece que si no se hace por etapas hay inestabilidad numerica. lo veo
con la camara plana que en ppio no deberia tener problemas para ajustar y
en mucha mayor medida con la fisheye.
quiza valga la pena iterar ete ciclo la cantidad de veces que sea necesario
hasta que converja. roguemos que converja
'''
params = formatParametersChessIntrs(rVecs, tVecs, linearCoeffs, distCoeffs) # generate Parameters obj
setDistortionParams(params,False)
setLinearParams(params,True)
setExtrinsicParams(params,True)
out = minimize(residualDirectChessRatio,
params,
args=(fiducialPoints,
corners),
xtol=1e-5, # Relative error in the approximate solution
ftol=1e-5, # Relative error in the desired sum of squares
maxfev=int(1e3))
'''
params = out.params
# setDistortionParams(params,False)
setLinearParams(params,True)
setExtrinsicParams(params,False)
out = minimize(residualDirectChessRatio,
params,
args=(fiducialPoints,
corners),
xtol=1e-5, # Relative error in the approximate solution
ftol=1e-5, # Relative error in the desired sum of squares
maxfev=int(1e3))
params = out.params
setDistortionParams(params,True)
setLinearParams(params,False)
# setExtrinsicParams(params,False)
out = minimize(residualDirectChessRatio,
params,
args=(fiducialPoints,
corners),
xtol=1e-5, # Relative error in the approximate solution
ftol=1e-5, # Relative error in the desired sum of squares
maxfev=int(1e3))
'''
return out
# %%
reload(pc)
# %% LOAD DATA
### fisheye data
imagesFolder = "./resources/fishChessboard/"
extension = "*.png"
cornersFile = "./resources/fishChessboard/fishCorners.npy"
patternFile = "./resources/chessPattern.npy"
imgShapeFile = "./resources/fishImgShape.npy"
distCoeffsFile = "./resources/fishDistCoeffs.npy"
linearCoeffsFile = "./resources/fishLinearCoeffs.npy"
rvecsFile = "./resources/fishChessboard/fishRvecs.npy"
tvecsFile = "./resources/fishChessboard/fishTvecs.npy"
### ptz data
#imagesFolder = "./resources/PTZchessboard/zoom 0.0/"
#extension = "*.jpg"
#cornersFile = "./resources/PTZchessboard/zoom 0.0/ptzCorners.npy"
#patternFile = "./resources/chessPattern.npy"
#imgShapeFile = "./resources/ptzImgShape.npy"
#
#distCoeffsFile = "./resources/PTZchessboard/zoom 0.0/ptzDistCoeffs.npy"
#linearCoeffsFile = "./resources/PTZchessboard/zoom 0.0/ptzLinearCoeffs.npy"
#rvecsFile = "./resources/PTZchessboard/zoom 0.0/ptzRvecs.npy"
#tvecsFile = "./resources/PTZchessboard/zoom 0.0/ptzTvecs.npy"
corners = np.load(cornersFile).transpose((0,2,1,3))
fiducialPoints = np.load(patternFile)
imgSize = np.load(imgShapeFile)
images = glob.glob(imagesFolder+extension)
distCoeffsTrue = np.load(distCoeffsFile)
linearCoeffsTrue = np.load(linearCoeffsFile)
rVecsTrue = np.load(rvecsFile)
tVecsTrue = np.load(tvecsFile)
# %% # %% from testHomography.py
## use real data
#f = 5e2 # proposal of f, can't be estimated from homography
#
#rVecs, tVecs, Hs = pc.estimateInitialPose(fiducialPoints, corners, f, imgSize)
#
#pc.plotHomographyToMatch(fiducialPoints, corners[1:3], f, imgSize, images[1:3])
#
#pc.plotForwardHomography(fiducialPoints, corners[1:3], f, imgSize, Hs[1:3], images[1:3])
#
#pc.plotBackwardHomography(fiducialPoints, corners[1:3], f, imgSize, Hs[1:3])
# %%
model= 'rational'
f = 1e3 # importa el orden de magnitud aunque no demaisado.
# entre 1e2 y 1e3 anda?
# %% intrinsic parameters initial conditions
linearCoeffsIni = np.array([[f, 0, imgSize[1]/2], [0, f, imgSize[0]/2], [0, 0, 1]])
#distCoeffsIni = np.zeros((14, 1)) # despues hacer generico para todos los modelos
#k = 10 # factor en que escalear la distancia focal
#linearCoeffsIni = linearCoeffsTrue * [k,k,1]
distCoeffsIni = distCoeffsTrue
# %% extrinsic parameters initial conditions, from estimated homography
rVecsIni, tVecsIni, Hs = pc.estimateInitialPose(fiducialPoints, corners, linearCoeffsIni)
#rVecsIni = rVecsTrue
#tVecsIni = tVecsTrue
# %% from testposecalibration DIRECT GENERIC CALIBRATION
i=0
img = plt.imread(images[i])
imageCorners = corners[i]
rVec = rVecsIni[i]
tVec = tVecsIni[i]
linearCoeffs = linearCoeffsIni
distCoeffs = distCoeffsIni
# direct mapping with initial conditions
cornersProjected = pc.direct(fiducialPoints, rVec, tVec, linearCoeffs, distCoeffs, model)
# plot corners in image
pc.cornerComparison(img, imageCorners, cornersProjected)
# %%
# format parameters
initialParams = formatParametersChessIntrs(rVecsIni, tVecsIni, linearCoeffsIni, distCoeffsIni)
# test retrieving parameters
# n=10
# retrieveParametersChess(initialParams)
# %%
#E = residualDirectChessRatio(initialParams, fiducialPoints, corners)
out = calibrateDirectChessRatio(fiducialPoints, corners, rVecsIni, tVecsIni, linearCoeffsIni, distCoeffsIni)
out.nfev
out.message
out.lmdif_message
(out.residual**2).sum()
# %%
rVecsOpt, tVecsOpt, cameraMatrixOpt, distCoeffsOpt = retrieveParametersChess(out.params)
# %%cameraMatrix0
img = plt.imread(images[i])
imageCorners = corners[i]
rVec = rVecsOpt[i]
tVec = tVecsOpt[i]
linearCoeffs = cameraMatrixOpt
distCoeffs = distCoeffsOpt
# direct mapping with initial conditions
cornersProjected = pc.direct(fiducialPoints, rVec, tVec, linearCoeffs, distCoeffs, model)
# plot corners in image
pc.cornerComparison(img, imageCorners, cornersProjected)
# %% comparar fiteos. true corresponde a lo que da chessboard
distCoeffsTrue
distCoeffsOpt
pc.plotRationalDist(distCoeffsTrue,imgSize, linearCoeffsTrue)
pc.plotRationalDist(distCoeffsOpt,imgSize, cameraMatrixOpt)
linearCoeffsTrue
cameraMatrixOpt
rVecsTrue[i]
rVecsOpt[i]
tVecsTrue[i]
tVecsOpt[i]
np.linalg.norm(rVecsTrue[9])
np.linalg.norm(rVecsOpt[9])
# %% ver porque el signo cambiado en rVecs, que significa?
r1, r2 = rVecsTrue[1,:,0],rVecsOpt[1,:,0]
r1
r2
np.linalg.norm(r1), np.linalg.norm(r2)
distance = np.array([np.linalg.norm(rVecsTrue[j,:,0] - rVecsOpt[j,:,0]) for j in range(len(rVecsTrue))]) / np.pi / 2
# es por la periodicidad en 2pi
plt.figure()
plt.plot(distance)
'''
pero no quiere decir que la camara apunta hacia donde debe. pero da igual
que opencv al menos
'''
| bsd-3-clause |
dennisobrien/bokeh | examples/app/dash/main.py | 3 | 4227 | from collections import Counter
from math import pi
import numpy as np
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, DataTable, RangeTool, TableColumn
from bokeh.palettes import Spectral11
from bokeh.plotting import figure
from bokeh.transform import cumsum
from bokeh.sampledata.autompg2 import autompg2 as mpg
from bokeh.sampledata.stocks import AAPL
# Timeseries
dates = np.array(AAPL['date'], dtype=np.datetime64)
source = ColumnDataSource(data=dict(date=dates, close=AAPL['adj_close']))
p = figure(plot_height=110, tools="", toolbar_location=None, #name="line",
x_axis_type="datetime", x_range=(dates[1500], dates[2500]), sizing_mode="scale_width")
p.line('date', 'close', source=source, line_width=2, alpha=0.7)
p.yaxis.axis_label = 'Traffic'
p.background_fill_color="#f5f5f5"
p.grid.grid_line_color="white"
select = figure(plot_height=50, plot_width=800, y_range=p.y_range,
x_axis_type="datetime", y_axis_type=None,
tools="", toolbar_location=None, sizing_mode="scale_width")
range_rool = RangeTool(x_range=p.x_range)
range_rool.overlay.fill_color = "navy"
range_rool.overlay.fill_alpha = 0.2
select.line('date', 'close', source=source)
select.ygrid.grid_line_color = None
select.add_tools(range_rool)
select.toolbar.active_multi = range_rool
select.background_fill_color="#f5f5f5"
select.grid.grid_line_color="white"
select.x_range.range_padding = 0.01
layout = column(p, select, sizing_mode="scale_width", name="line")
curdoc().add_root(layout)
# Donut chart
x = Counter({ 'United States': 157, 'United Kingdom': 93, 'Japan': 89, 'China': 63,
'Germany': 44, 'India': 42, 'Italy': 40, 'Australia': 35, 'Brazil': 32,
'France': 31, 'Taiwan': 31 })
data = pd.DataFrame.from_dict(dict(x), orient='index').reset_index().rename(index=str, columns={0:'value', 'index':'country'})
data['angle'] = data['value']/sum(x.values()) * 2*pi
data['color'] = Spectral11
region = figure(plot_height=350, toolbar_location=None, outline_line_color=None, sizing_mode="scale_both", name="region", x_range=(-0.4, 1))
region.annular_wedge(x=-0, y=1, inner_radius=0.2, outer_radius=0.32,
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
line_color="white", fill_color='color', legend='country', source=data)
region.axis.axis_label=None
region.axis.visible=False
region.grid.grid_line_color = None
region.legend.label_text_font_size = "0.7em"
region.legend.spacing = 1
region.legend.glyph_height = 15
region.legend.label_height = 15
curdoc().add_root(region)
# Bar chart
plats = ("IOS", "Android", "OSX", "Windows", "Other")
values = (35, 22, 13, 26, 4)
platform = figure(plot_height=350, toolbar_location=None, outline_line_color=None, sizing_mode="scale_both", name="platform",
y_range=list(reversed(plats)), x_axis_location="above")
platform.x_range.start = 0
platform.ygrid.grid_line_color = None
platform.axis.minor_tick_line_color = None
platform.outline_line_color = None
platform.hbar(left=0, right=values, y=plats, height=0.8)
curdoc().add_root(platform)
# Table
source = ColumnDataSource(data=mpg[:6])
columns = [
TableColumn(field="cyl", title="Counts"),
TableColumn(field="cty", title="Uniques"),
TableColumn(field="hwy", title="Rating"),
]
table = DataTable(source=source, columns=columns, height=210, width=330, name="table", sizing_mode="scale_both")
curdoc().add_root(table)
# Setup
curdoc().title = "Bokeh Dashboard"
curdoc().template_variables['stats_names'] = ['users', 'new_users', 'time', 'sessions', 'sales']
curdoc().template_variables['stats'] = {
'users' : {'icon': 'user', 'value': 11200, 'change': 4 , 'label': 'Total Users'},
'new_users' : {'icon': 'user', 'value': 350, 'change': 1.2 , 'label': 'New Users'},
'time' : {'icon': 'clock-o', 'value': 5.6, 'change': -2.3 , 'label': 'Total Time'},
'sessions' : {'icon': 'user', 'value': 27300, 'change': 0.5 , 'label': 'Total Sessions'},
'sales' : {'icon': 'dollar-sign', 'value': 8700, 'change': -0.2 , 'label': 'Average Sales'},
}
| bsd-3-clause |
julienr/vispy | vispy/ext/_bundled/mplexporter.py | 17 | 28160 | """
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
Copyright (c) 2014, mpld3
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, # noqa
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this # noqa
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # noqa
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import warnings
import io
from . import mplutils as utils
import matplotlib
from matplotlib import transforms
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parmeters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.collections.Collection):
self.draw_collection(ax, child,
force_pathtrans=ax.transAxes)
else:
warnings.warn("Legend element %s not impemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not impemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] is None:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
##############################################################################
# Renderers/base.py
import itertools
from contextlib import contextmanager
import numpy as np
from . import _mpl_py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| bsd-3-clause |
kavvkon/enlopy | tests/test_generate.py | 1 | 5438 | import numpy as np
import pandas as pd
from enlopy.utils import make_timeseries
from enlopy.generate import (add_noise, gen_daily_stoch_el, gen_load_from_daily_monthly, gen_load_sinus, gen_demand_response,
disag_upsample, clean_convert, countweekend_days_per_month,
gen_analytical_LDC, gen_load_from_LDC, gen_corr_arrays, gen_gauss_markov)
class Test_noise():
def test_ndarray_gauss(self):
a = np.random.rand(24)
b = np.random.rand(24) / 10
c = gen_gauss_markov(a, b, .9)
assert isinstance(c, np.ndarray)
def test_ndarray_add_noise_gauss(self):
a = np.random.rand(8760)
b = add_noise(a, 3, 0.05) # Gauss Markov noise
assert isinstance(b, pd.Series)
def test_2d_ndarray_add_noise_gauss(self):
a = np.random.rand(8760, 2)
b = add_noise(a, 3, 0.05) # Gauss Markov noise
assert isinstance(b, pd.DataFrame)
assert (8760,2) == b.shape
def test_ndarray_add_noise_normal(self):
a = np.random.rand(8760)
b = add_noise(a, 1, 0.05) # Gauss Markov noise
assert isinstance(b, pd.Series)
def test_2d_ndarray_add_noise_normal(self):
a = np.random.rand(8760, 2)
b = add_noise(a, 1, 0.05) # Gauss Markov noise
assert isinstance(b, pd.DataFrame)
assert (8760,2) == b.shape
def test_ndarray_add_noise_uniform(self):
a = np.random.rand(8760)
b = add_noise(a, 2, 0.05) # Gauss Markov noise
assert isinstance(b, pd.Series)
def test_2d_ndarray_add_noise_uniform(self):
a = np.random.rand(8760, 2)
b = add_noise(a, 2, 0.05) # Gauss Markov noise
assert isinstance(b, pd.DataFrame)
assert (8760,2) == b.shape
def test_add_noise_not_annual(self):
a = np.random.rand(15)
b = add_noise(a, 3, 0.05)
assert isinstance(b, pd.Series)
class Test_gen_monthly_daily():
def test_gen_monthly_daily(self):
Weight = .55 # Ratio between working and non-working day load (e.g. 70% - 30% )
ML = 1000 * np.ones(12) # monthly load
DWL = np.random.rand(24) * 10 # daily load working
DWL = DWL / DWL.sum() # normalized
DNWL = np.random.rand(24) * 5 # daily load non working
DNWL = DNWL / DNWL.sum() # daily load non working
year = 2014
Load1 = gen_load_from_daily_monthly(ML, DWL, DNWL, Weight, year)
assert len(Load1) == 8760
assert np.isclose(Load1.sum(), np.sum(ML))
class Test_gen_dummy_load():
def test_gen_dummy(self):
a = gen_daily_stoch_el(1500)
assert isinstance(a, np.ndarray)
assert len(a) == 24
class Test_gen_load_sinus():
def test_gen_sinus(self):
Load1 = gen_load_sinus(1,2,3,4,5,6)
assert len(Load1) == 8760
class Test_disag():
def test_disag_daily_to_hourly(self):
x = np.arange(0, 365)
y = (np.cos(2 * np.pi / 364 * x) * 50 + 100)
y = make_timeseries(y, freq='d')
disag_profile = np.random.rand(24)
y_disag = disag_upsample(y, disag_profile)
assert np.isclose(np.sum(y_disag), np.sum(y)) # <= 0.001 #FIXME: np test equality
assert len(y_disag) == 8760
def test_disag_hourly_to_minutes(self):
x = np.arange(0, 8760)
y = (np.cos(2 * np.pi / 8759 * x) * 50 + 100)
y = make_timeseries(y, freq='h')
disag_profile = np.random.rand(60)
y_disag = disag_upsample(y, disag_profile, to_offset='t')
assert np.isclose(np.sum(y_disag), np.sum(y) ) # <= 0.001 #FIXME: np test equality
assert len(y_disag) == 8760*60
class Test_demand_side_management():
def test_load_shifting_small(self):
a = np.random.rand(8760) * 100
a = clean_convert(a, force_timed_index=True, always_df=False)
b = gen_demand_response(a,.1,.2)
assert np.isclose(np.sum(a), np.sum(b))
assert np.max(a) > np.max(b)
def test_load_shifting_big(self):
a = np.random.rand(8760) * 100
a = clean_convert(a, force_timed_index=True, always_df=False)
b = gen_demand_response(a,.15,.5)
assert np.isclose(np.sum(a), np.sum(b))
assert np.max(a) > np.max(b)
def test_countweekend_days_per_month():
a = make_timeseries(year=2015, length=8760, freq='h')
b = countweekend_days_per_month(a.resample('d').mean())
assert len(b) == 12
assert sum(b) == 104 #weekend days in 2015
def test_gen_analytical_LDC():
#Generate a simple LDC with Peak of 1
U = (1, 0.5, 0.2, 8760)
LDC = gen_analytical_LDC(U)
assert max(LDC[0]) == 1.0
assert min(LDC[0]) == 0.0
assert np.isclose(np.mean(LDC[0]), 0.5)
def test_gen_load_from_LDC():
# Only operate 90% of the time.
duration_fraction = 0.9
LDC = gen_analytical_LDC((1, 0.5, 0.2, 8760 * duration_fraction))
b = gen_load_from_LDC(LDC)
assert b.max() <= 1.0
# According to the defined formula anything below should be zero
val_perc = np.percentile(b, (1 - duration_fraction - 0.01) * 100)
assert np.isclose(val_perc, 0.0)
def test_gen_corr_arrays():
Na = 2
length = 1000
r = 0.85
M = np.array([[1, r],
[r, 1]])
A = gen_corr_arrays(Na, length, M)
new_r = np.corrcoef(A)[0][1]
assert A.shape == (Na, length)
#allow some tolerance of convergence..
assert np.abs(new_r - r) <= 0.03 | bsd-3-clause |
ksiomelo/cubix | spectralmix/SpectralCluster.py | 1 | 23910 | #!/usr/bin/env python
## A handler class that that takes input data and performed specified analyses.
# A number of possible models are available through this class
### make imports
import sys,os,re,time,cPickle
import numpy as np
from scipy.cluster.vq import kmeans2,kmeans
from scipy.cluster.vq import whiten
from scipy.spatial.distance import pdist,cdist,squareform
import scipy.stats as stats
import matplotlib.pyplot as plt
from ClusterBase import ClusterBase
from DistanceMatrix import DistanceMatrix
from GraphingLib import *
## use numpy
from numpy.linalg import eig
## use scipy and sparse matrices
try:
from scipy.sparse.linalg.eigen.arpack import eigen_symmetric, eigen
except: "WARNING: scipy.sparse.linalg is not available"
sys.path.append(os.path.join('.','MixModel'))
try:
from GraphingLib import *
except:
print "WARNING: GraphingLib not available -- is Pygraphviz installed?"
## turn off warning about casting complex numbers to real ones
import warnings
warnings.simplefilter("ignore", np.ComplexWarning)
### handle warnings
import warnings
warnings.simplefilter('error', UserWarning)
class SpectralCluster(ClusterBase):
'''
constructor
description
this class takes as input a raw matrix consisting of observations and features
the observations occupy the rows and the features the are the columns
also takes as input a distance matrix or a networkx graph
args
data - a raw matrix, a distance matrix, an affinity matrix or a networkx graph
k - the number of components in the mixture
labels - a list of the true labels if available
projID - the project id (that may include a path) to be used when saving results
dataHeader - are the names of the individual objects to cluster (i.e. [gene1,gene2])
dtype - specifies to the constructor which data type is being input
weighted - specifies whether or not a graph-type input is weighted
dataMax - this is the maximal value for a given data source with respect to the genome
refine - used to specify the method for noise refinement 'kmeans'
paramEstimator - 'distortion','silvalue' or 'fscore'
sigmaRange - range of sigmas to be searched
dmatPath - if raw data are given the dmat will be saved to increase compute speed
fileType - specify output file type -- types supported by matplotlib pdf, png, jpg
classifyStep - kmeans is the only current option
handleExtremeVals - if true extreme values in affinity matrix are mapped to a larger value
reweighting - true
algorithm:
(1) create an affinity matrix using a_ij = exp( d_ij^2 / 2*sigma^2)
(2) define the matrix D and create the matrix L
(3) create the matrix X using the eigenvectors of L
(4) create the normalized matrix Y from X
(5) cluster the rows in Y using K-means
(6) assign the points to clusters
'''
def __init__(self,data,k=None,labels=None,dataHeader=None,projID="generic",dtype='raw',weighted=False,dataMax=None,penalty=False,
fileType='pdf',verbose=False,sigma=None,refine=None,paramEstimator='distortion',sigmaRange=None,dmatPath=None,
classifyStep='kmeans',handleExtremeVals=False,reweighting=True):
if verbose == True:
print "INFO: running spectral clustering............."
if data == None:
print "ERROR: Bad data given"
return None
## class-wide variables
self.data = data
self.k = k
self.sigHat=sigma
self.verbose = verbose
self.dtype = dtype
self.projID = projID
self.weighted = weighted
self.dataMax = dataMax
self.fileType = fileType
self.refine = refine
self.paramEstimator = paramEstimator
self.handleExtremeVals = handleExtremeVals
self.unusedGenes = []
self.unusedIndices = []
self.clustResults = None
self.penalty = penalty
self.dmatPath = dmatPath
self.noiseValue = 999
self.classifyStep = classifyStep
self.reweighting = reweighting
## handle header and labels
if dataHeader != None:
self.dataHeader = [dat for dat in dataHeader]
self.origDataHeader = [odat for odat in dataHeader]
else:
self.dataHeader = None
self.origDataHeader = None
if labels != None:
self.origLabels = np.array([float(l) for l in labels])
self.labels = np.array([float(l) for l in labels])
else:
self.labels = None
self.origLabels = None
## set the range of sigma
if sigma == None and sigmaRange != None:
if type(sigmaRange) == type(np.array([])):
self.sigmaRange = sigmaRange
else:
print "WARNING: Invalid input for sigmaRange must be np.array"
elif sigma == None:
self.sigmaRange = np.arange(0.01,0.6,0.001)
if sigma != None and sigmaRange != None:
print "WARNING: both sigma and sigmaRange specified -- ignoring sigmaRange"
## class-wide variables
if self.dtype == 'graph':
self.G = data
if self.dataHeader == None:
print "ERROR: header values must be input along with the graph"
else:
if type(data) != type(np.array([])):
return None
self.n, self.d = np.shape(data)
## error check gene list
if self.verbose == True:
print "\tINFO: error checking data..."
self._error_check_input_data()
## determine how to run algorithm
if self.dtype == 'affinity':
self.run_from_affinity_matrix()
elif self.sigHat == None:
self.run_by_estimating_sigma()
else:
self.run_with_given_sigma()
## save results s.t. they are easily accessible
if self.clustResults != None:
self.aMat = self.clustResults['aMat']
self.eigVals = self.clustResults['eigVals']
self.xMat = self.clustResults['xMat']
self.yMat = self.clustResults['yMat']
else:
"WARNING: the algorithm returned no results"
## perform evaluation (here we panalize for genes that are thown out by calling them noise)
if self.clustResults == None:
print "ERROR: ClustResults returned None -skipping evaluation"
elif self.labels == None:
pass
elif self.penalty == True:
try:
self.evaluation = self.evaluate_cluster_assignments(self.origLabels,self.clustResults['labels'])
except:
print "ERROR: there was an error in the penalized evaluation"
else:
try:
self.evaluation = self.evaluate_cluster_assignments(self.labels,self.clustResults['labels'])
except:
print "ERROR: there was an error in the nonpenalized evaluation"
## add final distortion and sil values
if self.clustResults != None and self.clustResults['centroids'] != None:
self.distortion = self.calculate_distortion_measure(self.clustResults)
self.silVals = self.get_silhouette_values(self.clustResults['yMat'],labels=self.clustResults['labels'])
else:
self.distortion = 0
self.silValus = 0
## perform inference
#self.perform_noise_inference()
####################################################################
#
# methods to run spectral clustering
#
####################################################################
## scan a region for sigma
def run_by_estimating_sigma(self):
## run the algorithm for a range of sigma values
if self.verbose == True:
print "\tfinding sigma..."
print "\tusing %s"%self.paramEstimator
if self.paramEstimator not in ['distortion','silvalue','fscore']:
print "ERROR: Input value for paramEstimator must be 'distortion', silvalue' or 'fscore'"
if self.paramEstimator == 'fscore' and self.labels == None:
print "ERROR: if estimator is 'fscore' labels must be known"
## prepare progress points
progressPercents = np.array([0,0.25,0.50,0.75,1.0])
progress = [int(round(i)) for i in progressPercents * len(self.sigmaRange)]
self.fscore = -1e08
self.distortion = 1e08
self.silValue = -1e-08
self.clustResults = None
for s in range(len(self.sigmaRange)):
sigma = self.sigmaRange[s]
clustResults = None
numTries = 0
while numTries < 10:
try:
clustResults = self.run_spectral_clust_ng(sigma)
except:
clustResults = None
if clustResults == None:
numTries += 1
else:
numTries = 100
## calculate value for sigma estimator
if clustResults == None:
continue
if self.paramEstimator == 'distortion':
distortion = self.calculate_distortion_measure(clustResults)
elif self.paramEstimator == 'silvalue':
silVals = self.get_silhouette_values(clustResults['yMat'],labels=clustResults['labels'])
elif self.paramEstimator == 'fscore':
if self.penalty == True:
evaluation = self.evaluate_cluster_assignments(self.origLabels,clustResults['labels'])
else:
evaluation = self.evaluate_cluster_assignments(self.labels,clustResults['labels'])
## report progress
if s in progress and self.verbose == True:
print progressPercents[progress.index(s)]*100, "%complete"
## error checking
if self.paramEstimator == 'distortion' and distortion < 0:
continue
## save best results
if self.paramEstimator == 'distortion' and distortion < self.distortion:
self.distortion = distortion
self.clustResults = clustResults
self.sigHat = sigma
elif self.paramEstimator == 'silvalue' and silVals.mean() > self.silValue:
self.silValue = silVals.mean()
self.clustResults = clustResults
self.sigHat = sigma
elif self.paramEstimator == 'fscore' and evaluation['f1score'] > self.fscore:
self.fscore = evaluation['f1score']
self.clustResults = clustResults
self.sigHat = sigma
def run_with_given_sigma(self):
## run the algorithm for the a given sigma value
if self.verbose == True:
print "\tINFO: running for specified sigma:", self.sigHat
self.clustResults = None
numTries = 0
while numTries < 10:
try:
self.clustResults = self.run_spectral_clust_ng(self.sigHat)
except:
self.clustResults = None
if self.clustResults == None:
numTries += 1
else:
numTries = 100
def run_from_affinity_matrix(self):
if self.verbose == True:
print "\tINFO:running from affinity matrix"
self.clustResults = self.run_spectral_clust_ng(self.sigHat)
##############################################
#
# main spectral clustering implimentation
#
##############################################
## the method that carries out the algorithm for spectral clustering as proposed by Andrew Ng (2001)
def run_spectral_clust_ng(self,sigma,plots=False):
dmatPickle = 'NotAFile'
## create a distance (similarity) matrix
if self.dtype=='raw':
self.dMat = self.raw_to_distance_mat(self.data)
if self.dtype=='graph':
if self.dmatPath != None and os.path.isfile(self.dmatPath) == False:
if self.verbose == True:
print '...............creating new dMat to be pickled...'
self.dMat = self.graph_to_distance_mat(self.G,self.dataHeader,weighted=self.weighted,reweighting=self.reweighting,verbose=self.verbose)
cPickle.dump(self.dMat,open(self.dmatPath,'w'))
elif self.dmatPath != None and os.path.isfile(self.dmatPath) == True:
if self.verbose == True:
print '...............using pickled dmat'
self.dMat = cPickle.load(open(self.dmatPath,'r'))
else:
self.dMat = self.graph_to_distance_mat(self.G,self.dataHeader,weighted=self.weighted,reweighting=self.reweighting,verbose=self.verbose)
if self.dtype == 'distance':
self.dMat = self.data
## handle the affinity matrix
if self.dtype == 'affinity':
aMat = self.data
else:
aMat = self.distance_to_affinity_mat(self.dMat,sigma,reshape=self.penalty)
## handle extreme values in affinity matrix
if self.handleExtremeVals == True:
aMat[np.where(aMat < 0.00001)] = 0.00001
## create the diagonal matrix D
diaMat = self.affinity_to_diagonal_mat(aMat)
## find the matrix L
result = np.dot(diaMat,aMat) # multiply A and D^{-1/2}
lMat = np.dot(result,diaMat) # multiply the above result times D^{-1/2}
# test to make sure lMat is finite and does not contain NaNs
testNan = np.where(np.isnan(lMat) == True)
testNanResult = [len(z) for z in testNan]
testFinite = np.where(np.isfinite(lMat) == False)
testFiniteResult = [len(z) for z in testFinite]
if np.array(testFiniteResult).sum() > 0 and self.verbose == True:
print "WARNING: failed finite check"
elif np.array(testNanResult).sum() > 0 and self.verbose == True:
print "WARNING: failed nan check"
## find the k largest eigenvectors of L
eigVals, eigVecs = eig(lMat)
#eigVals, eigVecs = eigen_symmetric(lMat, k=self.k) # use scipy for sparse matrices eigen eigen_symmetric
eigVecs = -1.0 * eigVecs
sortedEigInds = np.argsort(np.sum(abs(eigVecs),0))
xMat = eigVecs[:,sortedEigInds[-self.k:]]
## compute normalized matrix Y from X
n,k = np.shape(xMat)
yMat = np.zeros([n,k])
unitLengths = np.sum(xMat**2,axis=0)**(0.5)
for col in range(k):
yMat[:,col] = xMat[:,col] / unitLengths[col]
## cluster the rows of Y using Kmeans
tries = 0
iters = 0
minDistortion = 1e08
bestClusters = None
bestKmeanLabels = None
## use kmeans to cluster in eigen space
if self.classifyStep == 'kmeans':
tries = 0
while tries < 5:
try:
kmeanResults, kmeanLabels = kmeans2(yMat,self.k)
localDistortion = self.calculate_distortion_measure({'centroids':kmeanResults, 'labels':kmeanLabels, 'yMat':yMat})
tries = 100
except:
kmeanResults = None
tries += 1
if kmeanResults == None:
kmeanResults, localDistortion = kmeans(yMat,self.k,iter=25)
### get the labels
try:
if self.penalty == True:
n = len(self.origDataHeader)
else:
n = self.n
kmeanLabels = np.zeros((n),)
for o in range(n):
minDist = 1e08
lab = None
obs = yMat[o,:]
for c in range(self.k):
centroid = kmeanResults[c,:]
dist = np.linalg.norm(obs-centroid)
if dist < minDist:
minDist = dist
lab = c
if minDist == 1e08:
print 'error: issue when calculating labels in kmeans1 SpectralCluster.py'
kmeanLabels[o] = lab
except:
kmeanLabels = None
if kmeanResults == None or kmeanLabels == None:
# print 'ERROR: failed at eigenspace clustering step'
return None
else:
return {'centroids':kmeanResults,'labels':kmeanLabels,'aMat':aMat,'yMat':yMat,'xMat':xMat,'eigVals':eigVals}
else:
print "ERROR: additional classifyStep methods have not been implemented use kmeans"
sys.exit()
## permute the newLabels until the difference from true is minimuzed
def permute_labels(self,trueLabels,newLabels):
permutations = self.permute_list(range(self.k))
minDiff = np.sum(np.abs(trueLabels - newLabels))
for perm in permutations:
permLabels = np.array([perm[i] for i in newLabels])
absDiff = np.abs(trueLabels - permLabels)
diff = len(np.where(absDiff >= 1)[0])
if diff < minDiff:
newLabels = permLabels
minDiff = diff
return newLabels
def permute_list(self,lst):
sz = len(lst)
if sz <= 1:
return [lst]
return [p[:i]+[lst[0]]+p[i:] for i in xrange(sz) for p in self.permute_list(lst[1:])]
## evaluate clustering performance (assumes labels are permuted to best match)
def evaluate_cluster_assignments(self,trueLabels,newLabels):
trueLabels = [int(l) for l in trueLabels]
newLabels = [int(l) for l in newLabels]
if len(trueLabels) != len(newLabels):
print "INPUT ERROR: len of true and new labels must be the same",len(trueLabels),len(newLabels)
return None
posCalls,allPosCalls,allMadeCalls = 0,0,0
for i in range(len(trueLabels)):
for j in range(len(trueLabels)):
if j >= i:
continue
if trueLabels[i] == trueLabels[j]:
allPosCalls += 1
if newLabels[i] == newLabels[j]:
allMadeCalls += 1
if trueLabels[i] == trueLabels[j] and newLabels[i] == newLabels[j]:
posCalls += 1
#print "\t", posCalls, allPosCalls, allMadeCalls, newLabels
posCalls,allPosCalls,allMadeCalls = map(float,[posCalls,allPosCalls,allMadeCalls])
recall = posCalls/allPosCalls
precision = posCalls/allMadeCalls
f1score = 2.0 * (precision*recall) / (precision + recall)
return {'recall':recall,'precision':precision,'f1score':f1score}
def make_plot(self,plotType,header=None,data=None,labels=None,weighted=False,name='',fileType='pdf',vizThreshold=None,viewNoise=False):
if plotType not in ['dMat','aMat','diaMat','lMat','xMat','yMat','scatter','graph','eigenspace']:
print "INPUT ERROR: plotType in valid must be 'dMat','aMat','diaMat','lMat','xMat','yMat','scatter','graph'"
if plotType != 'graph':
self.plt = plt
self.fig= self.plt.figure(figsize = (7,7))
self.ax=self.fig.add_subplot(111)
if plotType=='eigenspace':
self.plt.title(r"Points in eigenvector space - with $\mathbf{\mu}$ estimates")
## plot mean estimates
for k in range(self.k):
self.plt.plot(self.clustResults['centroids'][k,0],self.clustResults['centroids'][k,1],'kx',markersize=10.0,markeredgewidth=5.0)
## plot data by class
if self.k == 2:
self._plot_scatter_data(self.yMat,labels=labels)
elif self.k == 3:
self._plot_scatter_data(self.yMat,labels=labels,use3D=False) ## toggle experimental 3D mode here
else:
self._plot_scatter_data(self.yMat[:,[0,1]],labels=labels)
print "WARNING: plotting not possible for dimesions greater than 3 - using only first 2"
elif plotType == 'graph':
#self.my_cmap = get_cmap_blues()
plot_network_data(self.G,header, labels=labels,name="graph_"+name,layout="neato",
nameDict=None,dataMax=self.dataMax,weighted=weighted,
viewPlot=False,fileType=fileType,vizThreshold=vizThreshold)
elif plotType == 'scatter':
if data == None:
"ERROR: plot type 'scatter' must have data specified as input.. skipping"
else:
self._plot_scatter_data(data,labels=labels)
elif plotType == 'dMat':
self._generate_heatmap(self.dMat,labels=labels)
elif plotType == 'aMat':
self._generate_heatmap(self.aMat,labels=labels)
elif plotType == 'diaMat':
self._generate_heatmap(self.diaMat,labels=labels)
elif plotType == 'lMat':
self._generate_heatmap(self.lMat,labels=labels)
elif plotType == 'xMat':
self._generate_heatmap(self.xMat,labels=labels)
elif plotType == 'yMat':
self._generate_heatmap(self.yMat,labels=labels)
## retrun plt if available
if plotType != 'graph':
plt.savefig(self.projID+"_%s.%s"%(plotType,self.fileType))
return plt
def kmeans_inference(self):
kmeanData = {}
if self.clustResults == None:
return None
newLabels = self.clustResults['labels'].copy()
for k in range(self.k):
kmeanData[k] = self.yMat[np.where(self.clustResults['labels']==k)[0],:]
## get the euclidean distances from the centroids
criticalVals = {}
for k in range(self.k):
euclidDist = (kmeanData[k] - kmeanData[k].mean(axis=0))**2.0
euclidDist = np.sqrt(euclidDist.sum(axis=1))
pvals = np.array([np.nan]).repeat(euclidDist.size) #zeros(euclidDist.size,dtype='float')
for p in range(euclidDist.size):
point = euclidDist[p]
### perform two sided hypothesis test
if point < euclidDist.mean():
pvals[p] = 1.0
elif point > euclidDist.mean():
pvals[p] = 1.0 - stats.norm.cdf(point,loc=euclidDist.mean(), scale=euclidDist.std())
if len(np.where(pvals<0.05)) > 0:
noiseInds = np.where(self.clustResults['labels']==k)[0][np.where(pvals<0.05)[0]]
newLabels[noiseInds] = self.noiseValue
return {'labels':newLabels,'criticalVals':criticalVals}
def _eigenvalue_plot(self):
plt.plot([range(len(self.eigVals))],[self.eigVals], marker='o',color='k',markersize=8.0)
plt.ylim([0,1.2])
plt.xlim([-0.5,len(self.eigVals) + 0.5])
return plt
| apache-2.0 |
ChileanVirtualObservatory/ASYDO | src/asydo/factory.py | 1 | 4388 | # Create several cubes in parallel, using random parameters
from multiprocessing import Pool
import multiprocessing
from .vu import *
from .db import *
import math
import random
import numpy as np
import sys
import os
import time
import matplotlib.pyplot as plt
import copy
import pickle
#SPEED_OF_LIGHT = 299792458.0
#KILO = 1000
class IMCConf:
def __init__(self,number,dbpath="ASYDO",mol_list="all",mol_prob=0.3,x_pos=0.0,y_pos=0.0,f_pos=300000,spa_pix=5,spe_pix=100,fov=500,bw=2000,rvel=(150,1000),temp=(50,500),semiaxis=(10,300),fwhm=(10,50),angle=(0,math.pi),rot=(50,500),curtosis=(-5,5)):
self.rvel=rvel
self.number=number
self.mol_prob=mol_prob
self.mol_list=mol_list
self.dbpath=dbpath
self.x_pos=x_pos
self.y_pos=y_pos
self.f_pos=f_pos
self.spa_pix=spa_pix
self.spe_pix=spe_pix
self.bw=bw
self.fov=fov
self.temp=temp
self.semiaxis=semiaxis
self.fwhm=fwhm
self.rot=rot
self.angle=angle
self.curtosis=curtosis
self.force_list=list()
self.ban_list=list()
def set_params(self,template):
self.rvel =template.rvel
self.mol_prob=template.mol_prob
self.mol_list=template.mol_list
self.dbpath =template.dbpath
self.x_pos =template.x_pos
self.y_pos =template.y_pos
self.f_pos =template.f_pos
self.spa_pix =template.spa_pix
self.spe_pix =template.spe_pix
self.bw =template.bw
self.fov =template.fov
self.temp =template.temp
self.semiaxis=template.semiaxis
self.fwhm =template.fwhm
self.rot =template.rot
self.angle =template.angle
self.curtosis=template.curtosis
self.force_list=copy.deepcopy(template.force_list)
self.ban_list =copy.deepcopy(template.ban_list)
def unitary_IMC_cube(conf):
def rget(val):
if isinstance(val,tuple):
return random.uniform(val[0],val[1])
else:
return val
print("Generating cube", conf.number)
dba=db.lineDB(conf.dbpath)
dba.connect()
try:
os.mkdir("logs")
except OSError:
pass
log=open('logs/exp-c'+str(conf.number)+'.log', 'w')
univ=Universe(log)
xpos=rget(conf.x_pos)
ypos=rget(conf.y_pos)
univ.create_source('AutoGenCube-'+str(conf.number),xpos,ypos)
fpos=rget(conf.f_pos)
bw=rget(conf.bw)
rv=rget(conf.rvel)
lf=(fpos - bw/2.0)*math.sqrt((1 + rv*KILO/SPEED_OF_LIGHT)/(1 - rv*KILO/SPEED_OF_LIGHT))
uf=(fpos + bw/2.0)
if (conf.mol_list=="all"):
mlist=dba.getMoleculeList(lf,uf)
chList=list()
for mol in mlist:
chList.append(mol[0])
else:
chList=conf.mol_list
temp=rget(conf.temp)
#print chList
# HERE Random selection of molecules
for chName in chList:
if chName in conf.ban_list:
log.write("Mol: "+chName+" banned!")
continue
if not (chName in conf.force_list):
if random.random() > conf.mol_prob:
continue
molist=dba.getSpeciesList(chName,lf,uf)
s_x=rget(conf.semiaxis)
s_y=rget(conf.semiaxis)
angle=rget(conf.angle)
rot=rget(conf.rot)
fw=rget(conf.fwhm)
curt=rget(conf.curtosis)
mms=""
for mol in molist:
if mms!="":
mms+=","
mms+=str(mol[0])
model=IMCM(log,conf.dbpath,mms,temp,('normal',s_x,s_y,angle),('skew',fw,curt),('linear',angle,rot))
model.set_radial_velocity(rv)
univ.add_component('AutoGenCube-'+str(conf.number),model)
fov=rget(conf.fov)
bw=rget(conf.bw)
cube=univ.gen_cube('AutoGenCube-'+str(conf.number),xpos,ypos,fpos,fov/conf.spa_pix,fov,bw/conf.spe_pix,bw)
dba.disconnect()
log.close()
return cube
def unitary_IMC(conf):
try:
cube=unitary_IMC_cube(conf)
mstring=pickle.dumps(cube)
return mstring
except Exception as exp:
print(str(conf.number)+": ")
print(exp)
return exp
def gen_IMC_cubes(confs):
nproc=multiprocessing.cpu_count()
print("### Generating "+str(len(confs))+" cubes using "+str(nproc)+" processors ###")
p = Pool(nproc)
result=p.map(unitary_IMC,confs)
ret=list()
for ms in result:
ret.append(pickle.loads(ms))
return ret
#import cProfile
#import re
#cProfile.run('unitaryGen(0)')
| gpl-2.0 |
ZenDevelopmentSystems/scikit-learn | sklearn/decomposition/dict_learning.py | 104 | 44632 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=False)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
# Transposing product to ensure Fortran ordering
gram = np.dot(dictionary, dictionary.T).T
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
h2educ/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 103 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
hrjn/scikit-learn | examples/model_selection/grid_search_digits.py | 33 | 2764 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
harisbal/pandas | pandas/tests/indexes/timedeltas/test_scalar_compat.py | 1 | 2421 | # -*- coding: utf-8 -*-
"""
Tests for TimedeltaIndex methods behaving like their Timedelta counterparts
"""
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range
class TestVectorizedTimedelta(object):
def test_tdi_total_seconds(self):
# GH#10939
# test index
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
ser = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with nat
ser[1] = np.nan
s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +
12 + 100123456. / 1e9, np.nan], index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with both nat
ser = Series([np.nan, np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(ser.dt.total_seconds(),
Series([np.nan, np.nan], index=[0, 1]))
def test_tdi_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00')])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG
with tm.assert_raises_regex(ValueError, msg):
td.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
with tm.assert_raises_regex(ValueError, msg):
td.round(freq='M')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='M')
| bsd-3-clause |
pme1123/pyroots | pyroots/geometry_filters.py | 1 | 18515 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 11 09:46:19 2016
@author: pme
Contents:
Various functions for removing candidate objects based on their geometry.
- _percentile_filter: Supports diameter filter
- diameter_filter: Based on diameter along the medial axis
- length_width_filter: Based on length and diameter along the medial axis.
- morphology_filter: Based on properties of convex hulls and equivalent ellilpses
- hollow_filter: Based on medial axis lengths of original and filled objects
"""
from scipy import ndimage
import pandas as pd
import numpy as np
from skimage import morphology, measure
from pyroots.skeletonization import _axis_length
#########################################################################################################################
#########################################################################################################################
####### ########
####### Diameter Filter, Percentile Filter ########
####### ########
#########################################################################################################################
#########################################################################################################################
def _percentile_filter(labels, diameter_image, percentile, value, test_type):
"""
Function to classify (boolean) image objects based the distribution of pixel
values in the object. Acts as either a maximum or minimum filter.
Parameters
----------
labels : array
the output of ``ndimage.label``, which labels image objects in binary
images. For skeletons, use maximum distance rather than manhattan.
diameter_image : array
skeleton image with pixel values as diameter and background as 0, for
example.
percentile : float
the percentile of pixel values at which to make the decision to keep
value : float
the threshold at which the classification of an image object switches
test_type : str
Do the percentiles and values indicate minimum thresholds, or
maximum thresholds? Options are "ceiling" or "floor".
Returns
-------
A binary array
See Also
--------
``numpy.percentile``, ``pyroots.diameter_filter``
"""
# calculate percentile diameter for each object
out = []
for i in range(labels.max()):
temp = np.ma.masked_array(diameter_image, labels != i+1) # 0 is background
temp = np.percentile(temp.compressed(), percentile)
out.append(temp)
#select objects that meet diameter criteria at percentile
# i=0 is background, therefore i+1
if test_type == "ceiling":
out = [i+1 for i, x in enumerate(out) if x < value]
elif test_type == "floor":
out = [i+1 for i, x in enumerate(out) if x > value]
else:
print("Test_type should be 'ceiling' or 'floor'!")
#translate this to the labels image
out = np.in1d(labels, out) # is each pixel value represented in the
# list of objects to keep?
out = np.reshape(out, labels.shape) # reshape to image
return(out)
def diameter_filter(skeleton_dictionary,
max_diameter=1000, min_diameter=-1,
max_percentile=100, min_percentile=None, pixel_level = False):
"""
Remove objects based on width thresholds. For example, hyphae usually are
< 5um diameter, so objects that are mostly >5um are not hyphae, and
'objects' that are composits of debris and hyphae will have parts that are
> 5um. This function removes both. It also provides the option to provide a
floor for diameter. Requires ``scipy``.
Parameters
----------
skeleton_dictionary : dict
Standard dictionary of objects returned from ``pyroots.skeleton_with_distance``.
Items are:
* "objects", a binary image of objects
* "length", an ndarray of medial axis, with each pixel representing length
* "diameter", an ndarray of medial axis, with each pixel representing diameter
* "geometry", a pandas ``DataFrame`` of total length and average diameter for
each object.
max_percentile : float
Of all of the skeleton pixels for a single object, if the ceiling is smaller
than the percentile, the entire object is deleted. Feeds into
``numpy.percentile``. Default=100 (effectively no filter).
max_diameter : float
Cutoff, where true objects have a narrower (exclusive) diameter. In pixels.
Default=1000 (effectively no filter).
min_percentile : float
Of all of the skeleton pixels for a single object, if the floor is larger
than the percentile, the entire object is deleted. Feeds into
``numpy.percentile``. Default=``None``.
min_diameter : float
Cutoff, where true objects have a wider (exclusive) diameter. In pixels.
Default=-1 (for no filtering).
pixel_level : bool
If true, will remove individual pixels with values > ''max_diameter'' and
< ''min_diameter''.
Returns
-------
A list of four objects:
* An updated list of image objects
* A filtered skeleton array of pixel diameter
* A filtered skeleton array of pixel length
* A pandas dataframe of updated mean diameter and total length (in
pixels) of each object.
See Also
--------
``pyroots._percentile_filter``, ``scipy.ndimage.label``, ``pandas``
"""
diameter_in = skeleton_dictionary["diameter"]
length_in = skeleton_dictionary["length"]
objects_in = skeleton_dictionary["objects"]
# make sure skeletons for diameter and length are updated with objects
diameter = diameter_in * (objects_in > 0)
length = length_in * (objects_in > 0)
#Label objects for indexing
labels, labels_ls= ndimage.label(diameter > 0, # convert float to boolean
structure = np.ones((3,3))) #square for skeletons
#### Percentile filters ####
max_perc_filter = _percentile_filter(labels, diameter,
max_percentile, max_diameter,
'ceiling')
if min_percentile is not None:
min_perc_filter = _percentile_filter(labels, diameter,
min_percentile, min_diameter,
'floor')
perc_filter = max_perc_filter * min_perc_filter
else:
perc_filter = max_perc_filter
#### Max, min diameter filter ####
if pixel_level is True:
max_diam_filter = diameter < max_diameter
min_diam_filter = diameter > min_diameter
diam_filter = max_diam_filter * min_diam_filter * perc_filter
else:
diam_filter = perc_filter
#### Update the skeletons ####
new_diam_skeleton = diameter * diam_filter
new_len_skeleton = length * diam_filter
new_labels = labels * diam_filter
#### Update geometry dataframe ####
# Re-calculate geometry of each object
objects_diameter = ndimage.mean(new_diam_skeleton,
new_labels,
index=range(new_labels.max()+1))
objects_length = ndimage.sum(new_len_skeleton,
new_labels,
index=range(new_labels.max()+1))
# Create a new geometry dataframe
geom_out = pd.DataFrame({'Length' : objects_length,
'Diameter' : objects_diameter})
geom_out = geom_out[geom_out['Diameter'].notnull()] # subset only present objects
#### update the objects ####
labels, labels_ls = ndimage.label(objects_in) # make labels or original objects
new_objects = np.in1d(labels, np.unique(new_labels)) # only keep labels that were kept
new_objects = np.reshape(new_objects, new_labels.shape) * labels > 0 # maintain as binary
out = {"objects" : new_objects,
"length" : new_len_skeleton,
"diameter" : new_diam_skeleton,
"geometry" : geom_out}
return(out)
#########################################################################################################################
#########################################################################################################################
####### ########
####### Length-Width Filter ########
####### ########
#########################################################################################################################
#########################################################################################################################
def length_width_filter(skeleton_dictionary, threshold=5):
"""
Remove objects based on length:(average) width ratios from skeletonized images.
Parameters
----------
skeleton_dictionary : dict
Standard dictionary of objects returned from ``pyroots.skeleton_with_distance``.
Items are:
* "objects", a binary image of objects
* "length", an ndarray of medial axis, with each pixel representing length
* "diameter", an ndarray of medial axis, with each pixel representing diameter
* "geometry", a pandas ``DataFrame`` of total length and average diameter for
each object.
threshold : float
Minimum length:width ratio to keep an object. Default = 5.
Returns
-------
A list containing two objects. 1) is a binary array with only objects that
have a large enough length:width threshold. 2) is the updated geometry dataframe.
"""
diameter_in = skeleton_dictionary["diameter"]
length_in = skeleton_dictionary["length"]
objects_in = skeleton_dictionary["objects"]
geometry_in = skeleton_dictionary["geometry"]
# convert pandas.DataFrame to list
geometry = [geometry_in['Diameter'].values, geometry_in['Length'].values]
labels, labels_ls = ndimage.label(skeleton_dictionary["objects"])
if labels_ls + 1 != len(geometry_in.index):
raise("Incompatible Geometry Array and Image: Image has " + str(labels_ls + 1) + " objects. Geometry DataFrame has " + str(len(geometry_in.index)) + " objects.")
# Calculate length:width ratios in the geom array and test whether they pass
ratio = geometry_in['Length'] / (geometry_in['Diameter']+0.000001)
thresh_test = ratio > threshold
# Update geometry dataframe
geom_out = geometry_in[thresh_test]
geom_out.loc[0] = np.array([0,0]) # re-insert empty space index.
geom_out = geom_out.sort_index()
# Update objects dataframe. Convert the labels to a boolean by determining whether
# the object number is true for thresh_test
new_objects = np.array(thresh_test)[labels]
new_diam_skeleton = diameter_in * new_objects
new_length_skeleton = length_in * new_objects
out = {"objects" : new_objects,
"length" : new_length_skeleton,
"diameter" : new_diam_skeleton,
"geometry" : geom_out}
return(out)
#########################################################################################################################
#########################################################################################################################
####### ########
####### Morphology Filter ########
####### ########
#########################################################################################################################
#########################################################################################################################
def morphology_filter(image, loose_eccentricity=0, loose_solidity=1,
strict_eccentricity=0, strict_solidity=1,
min_length=None, min_size=None):
"""
Removes objects based on properties of convex hulls and equivalent
ellipses, plus size. Defaults are for no filtering. This algorithm is
moderately fast, but time increases with number of objects due to the
need for loops.
Parameters
----------
image : 2D binary array.
Candidate objects
loose_eccentricity, loose_solidity : float
AND filters. Must pass both levels.
strict_eccentricity, strict_solidity : float
OR filters. Must pass one of these
min_length : int
in pixels, of ellipse with equivalent moments to convex hull
min_size : int
in pixels, of area of candidate object.
Returns
-------
2D binary array
See Also
--------
`skimage.measure.regionprops`, `ndimage.label`
"""
#### easy stuff first ####
# min size
if min_size is None:
working_image = image.copy()
else:
working_image = morphology.remove_small_objects(image, min_size=min_size)
# min length of major axis
labels = ndimage.label(working_image)[0]
props = measure.regionprops(labels)
if min_length is not None:
length = [0] + [i.major_axis_length for i in props]
length = np.array(length)[labels] # make an image based on labels
working_image = length > min_length
#### eccentricity and solidity ####
labels = ndimage.label(working_image)[0]
props = measure.regionprops(labels)
# calculate eccentricity
eccentricity = [0] + [i.eccentricity for i in props]
eccentricity = np.array(eccentricity)[labels] # make an image based on labels
# calculate solidity
solidity = [0] + [i.filled_area / i.convex_area for i in props]
solidity = np.array(solidity)[labels] # make an image based on labels
# loose and strict filters
loose = ((solidity < loose_solidity) * (solidity > 0)) * (eccentricity > loose_eccentricity) # AND
strict = ((solidity < strict_solidity) * (solidity > 0)) + (eccentricity > strict_eccentricity) # OR
# Combine and exit. Must pass all.
out = strict * loose * working_image # AND
return(out)
#########################################################################################################################
#########################################################################################################################
####### ########
####### Hollow Filter ########
####### ########
#########################################################################################################################
#########################################################################################################################
def hollow_filter(image, ratio=1.5, fill_kernel=15, **kwargs):
"""
For each object, what is the ratio of A to B where:
A = medial axis length before filling (~= "perimeter" of hollow objects)
B = medial axis length after filling (= true medial of hollow objects)
Filters objects based on ratio, which is a ceiling for true objects. Assumes
true objects are not hollow.
This is a relatively slow algorithm, and should be performed last (time
proportional to number of objects due to loops).
Parameters
----------
image : 2D binary array
input image.
ratio : float
Maximum of A:B (see above)
fill_kernel : int
Radius of disk, in pixels, used to fill objects.
**kwargs : dict
passed on to `pyroots.noise_removal`
Returns
-------
A 2D binary array
See Also
--------
`skimage.morphology.binary_closing`, `pyroots.skeleton_with_distance`,
`pyroots.noise_removal`
"""
img = image.copy()
# Labels, object slices
labels, labels_ls = ndimage.label(img)
props = measure.regionprops(labels) # for slicing the image around objects
# kernel
kernel = morphology.disk(fill_kernel)
# Smooth the image. Medial axis is highly sensitive to bumps.
# skel = pr.noise_removal(img, **kwargs)
# skel = morphology.skeletonize(skel) # pull 'length' medial axis of all original objects
test = [0] * (labels_ls + 1)
for i in range(labels_ls + 1):
# Bounds of slice to only the object of interest
a, b, c, d = props[i-1].bbox
a = max(a - fill_kernel, 0) # include a buffer. Stay within bounds of image.
b = max(b - fill_kernel, 0)
c = min(c + fill_kernel, img.shape[1])
d = min(d + fill_kernel, img.shape[0])
temp_object = labels[a:c, b:d] == i
# compute original medial axis length
open_medial = morphology.skeletonize(temp_object)
open_length = _axis_length(open_medial)[1] # length float only
#close object and compute new axis length
closed_medial = morphology.binary_closing(temp_object, selem=kernel)
closed_medial = morphology.skeletonize(closed_medial)
closed_length = _axis_length(closed_medial)[1]
# Does the ratio pass the threshold?
test[i] = open_length/closed_length < ratio
# update image
out = np.array(test)[labels] * labels > 0
return(out)
| apache-2.0 |
EttusResearch/gnuradio | gr-filter/examples/fir_filter_ccc.py | 47 | 4019 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
pombredanne/dask | dask/dataframe/multi.py | 2 | 21152 | """
Algorithms that Involve Multiple DataFrames
===========================================
The pandas operations ``concat``, ``join``, and ``merge`` combine multiple
DataFrames. This module contains analogous algorithms in the parallel case.
There are two important cases:
1. We combine along a partitioned index
2. We combine along an unpartitioned index or other column
In the first case we know which partitions of each dataframe interact with
which others. This lets uss be significantly more clever and efficient.
In the second case each partition from one dataset interacts with all
partitions from the other. We handle this through a shuffle operation.
Partitioned Joins
-----------------
In the first case where we join along a partitioned index we proceed in the
following stages.
1. Align the partitions of all inputs to be the same. This involves a call
to ``dd.repartition`` which will split up and concat existing partitions as
necessary. After this step all inputs have partitions that align with
each other. This step is relatively cheap.
See the function ``align_partitions``.
2. Remove unnecessary partitions based on the type of join we perform (left,
right, inner, outer). We can do this at the partition level before any
computation happens. We'll do it again on each partition when we call the
in-memory function. See the function ``require``.
3. Embarrassingly parallel calls to ``pd.concat``, ``pd.join``, or
``pd.merge``. Now that the data is aligned and unnecessary blocks have
been removed we can rely on the fast in-memory Pandas join machinery to
execute joins per-partition. We know that all intersecting records exist
within the same partition
Hash Joins via Shuffle
----------------------
When we join along an unpartitioned index or along an arbitrary column any
partition from one input might interact with any partition in another. In
this case we perform a hash-join by shuffling data in each input by that
column. This results in new inputs with the same partition structure cleanly
separated along that column.
We proceed with hash joins in the following stages:
1. Shuffle each input on the specified column. See the function
``dask.dataframe.shuffle.shuffle``.
2. Perform embarrassingly parallel join across shuffled inputs.
"""
from __future__ import absolute_import, division, print_function
from bisect import bisect_left, bisect_right
from toolz import merge_sorted, unique, partial
import toolz
import numpy as np
import pandas as pd
from ..base import tokenize
from .core import (_get_return_type, _Frame, Scalar, DataFrame,
Index, _maybe_from_pandas)
from .io import from_pandas
from .shuffle import shuffle
from . import utils
def bound(seq, left, right):
""" Bound sorted list by left and right values
>>> bound([1, 3, 4, 5, 8, 10, 12], 4, 10)
[4, 5, 8, 10]
"""
return seq[bisect_left(seq, left): bisect_right(seq, right)]
def align_partitions(*dfs):
""" Mutually partition and align DataFrame blocks
This serves as precursor to multi-dataframe operations like join, concat,
or merge.
Parameters
----------
dfs: sequence of dd.DataFrame, dd.Series and dd.base.Scalar
Sequence of dataframes to be aligned on their index
Returns
-------
dfs: sequence of dd.DataFrame, dd.Series and dd.base.Scalar
These must have consistent divisions with each other
divisions: tuple
Full divisions sequence of the entire result
result: list
A list of lists of keys that show which data exist on which
divisions
"""
dfs1 = [df for df in dfs if isinstance(df, _Frame)]
if len(dfs) == 0:
raise ValueError("dfs contains no DataFrame and Series")
divisions = list(unique(merge_sorted(*[df.divisions for df in dfs1])))
dfs2 = [df.repartition(divisions, force=True)
if isinstance(df, _Frame) else df for df in dfs]
result = list()
inds = [0 for df in dfs]
for d in divisions[:-1]:
L = list()
for i, df in enumerate(dfs2):
if isinstance(df, _Frame):
j = inds[i]
divs = df.divisions
if j < len(divs) - 1 and divs[j] == d:
L.append((df._name, inds[i]))
inds[i] += 1
else:
L.append(None)
else: # Scalar has no divisions
L.append(None)
result.append(L)
return dfs2, tuple(divisions), result
def _maybe_align_partitions(args):
""" Align DataFrame blocks if divisions are different """
# passed to align_partitions
indexer, dasks = zip(*[x for x in enumerate(args)
if isinstance(x[1], (_Frame, Scalar))])
# to get current divisions
dfs = [df for df in dasks if isinstance(df, _Frame)]
if len(dfs) == 0:
# no need to align
return args
divisions = dfs[0].divisions
if not all(df.divisions == divisions for df in dfs):
dasks, _, _ = align_partitions(*dasks)
for i, d in zip(indexer, dasks):
args[i] = d
return args
def require(divisions, parts, required=None):
""" Clear out divisions where required components are not present
In left, right, or inner joins we exclude portions of the dataset if one
side or the other is not present. We can achieve this at the partition
level as well
>>> divisions = [1, 3, 5, 7, 9]
>>> parts = [(('a', 0), None),
... (('a', 1), ('b', 0)),
... (('a', 2), ('b', 1)),
... (None, ('b', 2))]
>>> divisions2, parts2 = require(divisions, parts, required=[0])
>>> divisions2
(1, 3, 5, 7)
>>> parts2 # doctest: +NORMALIZE_WHITESPACE
((('a', 0), None),
(('a', 1), ('b', 0)),
(('a', 2), ('b', 1)))
>>> divisions2, parts2 = require(divisions, parts, required=[1])
>>> divisions2
(3, 5, 7, 9)
>>> parts2 # doctest: +NORMALIZE_WHITESPACE
((('a', 1), ('b', 0)),
(('a', 2), ('b', 1)),
(None, ('b', 2)))
>>> divisions2, parts2 = require(divisions, parts, required=[0, 1])
>>> divisions2
(3, 5, 7)
>>> parts2 # doctest: +NORMALIZE_WHITESPACE
((('a', 1), ('b', 0)),
(('a', 2), ('b', 1)))
"""
if not required:
return divisions, parts
for i in required:
present = [j for j, p in enumerate(parts) if p[i] is not None]
divisions = tuple(divisions[min(present): max(present) + 2])
parts = tuple(parts[min(present): max(present) + 1])
return divisions, parts
required = {'left': [0], 'right': [1], 'inner': [0, 1], 'outer': []}
def join_indexed_dataframes(lhs, rhs, how='left', lsuffix='', rsuffix=''):
""" Join two partitiond dataframes along their index """
(lhs, rhs), divisions, parts = align_partitions(lhs, rhs)
divisions, parts = require(divisions, parts, required[how])
left_empty = lhs._empty_partition
right_empty = rhs._empty_partition
name = 'join-indexed-' + tokenize(lhs, rhs, how, lsuffix, rsuffix)
dsk = dict()
for i, (a, b) in enumerate(parts):
if a is None and how in ('right', 'outer'):
a = left_empty
if b is None and how in ('left', 'outer'):
b = right_empty
dsk[(name, i)] = (pd.DataFrame.join, a, b, None, how,
lsuffix, rsuffix)
# fake column names
j = left_empty.join(right_empty, None, how, lsuffix, rsuffix)
return DataFrame(toolz.merge(lhs.dask, rhs.dask, dsk), name,
j.columns, divisions)
def pdmerge(left, right, how, left_on, right_on,
left_index, right_index, suffixes,
default_left_columns, default_right_columns):
if not len(left):
left = pd.DataFrame(columns=default_left_columns)
if not len(right):
right = pd.DataFrame(columns=default_right_columns)
result = pd.merge(left, right, how=how,
left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
suffixes=suffixes)
return result
def hash_join(lhs, left_on, rhs, right_on, how='inner',
npartitions=None, suffixes=('_x', '_y')):
""" Join two DataFrames on particular columns with hash join
This shuffles both datasets on the joined column and then performs an
embarassingly parallel join partition-by-partition
>>> hash_join(a, 'id', rhs, 'id', how='left', npartitions=10) # doctest: +SKIP
"""
if npartitions is None:
npartitions = max(lhs.npartitions, rhs.npartitions)
lhs2 = shuffle(lhs, left_on, npartitions)
rhs2 = shuffle(rhs, right_on, npartitions)
if isinstance(left_on, Index):
left_on = None
left_index = True
else:
left_index = False
if isinstance(right_on, Index):
right_on = None
right_index = True
else:
right_index = False
# fake column names
left_empty = lhs._empty_partition
right_empty = rhs._empty_partition
j = pd.merge(left_empty, right_empty, how, None,
left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
suffixes=suffixes)
merger = partial(pdmerge, suffixes=suffixes,
default_left_columns=list(lhs.columns),
default_right_columns=list(rhs.columns))
if isinstance(left_on, list):
left_on = (list, tuple(left_on))
if isinstance(right_on, list):
right_on = (list, tuple(right_on))
token = tokenize(lhs, left_on, rhs, right_on, left_index, right_index,
how, npartitions, suffixes)
name = 'hash-join-' + token
dsk = dict(((name, i), (merger, (lhs2._name, i), (rhs2._name, i),
how, left_on, right_on,
left_index, right_index))
for i in range(npartitions))
divisions = [None] * (npartitions + 1)
return DataFrame(toolz.merge(lhs2.dask, rhs2.dask, dsk),
name, j.columns, divisions)
def _pdconcat(dfs, axis=0, join='outer'):
""" Concatenate caring empty Series """
# Concat with empty Series with axis=1 will not affect to the
# result. Special handling is needed in each partition
if axis == 1:
# becahse dfs is a generator, once convert to list
dfs = list(dfs)
if join == 'outer':
# outer concat should keep all empty Series
# input must include one non-empty data at least
# because of the alignment
first = [df for df in dfs if len(df) > 0][0]
def _pad(base, fillby):
if isinstance(base, pd.Series) and len(base) == 0:
# use aligned index to keep index for outer concat
return pd.Series([np.nan] * len(fillby),
index=fillby.index, name=base.name)
else:
return base
dfs = [_pad(df, first) for df in dfs]
else:
# inner concat should result in empty if any input is empty
if any(len(df) == 0 for df in dfs):
dfs = [pd.DataFrame(columns=df.columns)
if isinstance(df, pd.DataFrame) else
pd.Series(name=df.name) for df in dfs]
return pd.concat(dfs, axis=axis, join=join)
def concat_indexed_dataframes(dfs, axis=0, join='outer'):
""" Concatenate indexed dataframes together along the index """
if join not in ('inner', 'outer'):
raise ValueError("'join' must be 'inner' or 'outer'")
if not all(isinstance(df, _Frame) for df in dfs):
raise ValueError("All inputs must be dd.DataFrame or dd.Series")
dfs2, divisions, parts = align_partitions(*dfs)
empties = [df._empty_partition for df in dfs]
result = pd.concat(empties, axis=axis, join=join)
if isinstance(result, pd.Series):
columns = result.name
else:
columns = result.columns.tolist()
parts2 = [[df if df is not None else empty
for df, empty in zip(part, empties)]
for part in parts]
name = 'concat-indexed-' + tokenize(join, *dfs)
dsk = dict(((name, i), (_pdconcat, part, axis, join))
for i, part in enumerate(parts2))
return_type = _get_return_type(dfs[0], columns)
return return_type(toolz.merge(dsk, *[df.dask for df in dfs2]),
name, columns, divisions)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, suffixes=('_x', '_y'),
npartitions=None):
if not on and not left_on and not right_on and not left_index and not right_index:
on = [c for c in left.columns if c in right.columns]
if not on:
left_index = right_index = True
if on and not left_on and not right_on:
left_on = right_on = on
on = None
if (isinstance(left, (pd.Series, pd.DataFrame)) and
isinstance(right, (pd.Series, pd.DataFrame))):
return pd.merge(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, suffixes=suffixes)
# Transform pandas objects into dask.dataframe objects
if isinstance(left, (pd.Series, pd.DataFrame)):
if right_index and left_on: # change to join on index
left = left.set_index(left[left_on])
left_on = False
left_index = True
left = from_pandas(left, npartitions=1) # turn into DataFrame
if isinstance(right, (pd.Series, pd.DataFrame)):
if left_index and right_on: # change to join on index
right = right.set_index(right[right_on])
right_on = False
right_index = True
right = from_pandas(right, npartitions=1) # turn into DataFrame
# Both sides are now dd.DataFrame or dd.Series objects
if left_index and right_index: # Do indexed join
return join_indexed_dataframes(left, right, how=how,
lsuffix=suffixes[0], rsuffix=suffixes[1])
else: # Do hash join
return hash_join(left, left.index if left_index else left_on,
right, right.index if right_index else right_on,
how, npartitions, suffixes)
def _concat_dfs(dfs, name, join='outer'):
""" Internal function to concat dask dict and DataFrame.columns """
dsk = dict()
i = 0
empties = [df._empty_partition for df in dfs]
result = pd.concat(empties, axis=0, join=join)
if isinstance(result, pd.Series):
columns = result.name
else:
columns = result.columns.tolist()
for df in dfs:
if columns != df.columns:
df = df[[c for c in columns if c in df.columns]]
dsk = toolz.merge(dsk, df.dask)
for key in df._keys():
dsk[(name, i)] = key
i += 1
return dsk, columns
def concat(dfs, axis=0, join='outer', interleave_partitions=False):
""" Concatenate DataFrames along rows.
- When axis=0 (default), concatenate DataFrames row-wise:
- If all divisions are known and ordered, concatenate DataFrames keeping
divisions. When divisions are not ordered, specifying
interleave_partition=True allows concatenate divisions each by each.
- If any of division is unknown, concatenate DataFrames resetting its
division to unknown (None)
- When axis=1, concatenate DataFrames column-wise:
- Allowed if all divisions are known.
- If any of division is unknown, it raises ValueError.
Parameters
----------
dfs : list
List of dask.DataFrames to be concatenated
axis : {0, 1, 'index', 'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis
interleave_partitions : bool, default False
Whether to concatenate DataFrames ignoring its order. If True, every
divisions are concatenated each by each.
Examples
--------
If all divisions are known and ordered, divisions are kept.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(6, 8, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 3, 6, 8, 10)>
Unable to concatenate if divisions are not ordered.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(2, 3, 6)>
>>> dd.concat([a, b]) # doctest: +SKIP
ValueError: All inputs have known divisions which cannnot be concatenated
in order. Specify interleave_partitions=True to ignore order
Specify interleave_partitions=True to ignore the division order.
>>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 2, 3, 5, 6)>
If any of division is unknown, the result division will be unknown
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(None, None)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(1, 4, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
"""
if not isinstance(dfs, list):
dfs = [dfs]
if len(dfs) == 0:
raise ValueError('Input must be a list longer than 0')
if len(dfs) == 1:
return dfs[0]
if join not in ('inner', 'outer'):
raise ValueError("'join' must be 'inner' or 'outer'")
axis = DataFrame._validate_axis(axis)
dasks = [df for df in dfs if isinstance(df, _Frame)]
if all(df.known_divisions for df in dasks):
# must be converted here to check whether divisions can be
# concatenated
dfs = _maybe_from_pandas(dfs)
if axis == 1:
from .multi import concat_indexed_dataframes
return concat_indexed_dataframes(dfs, axis=axis, join=join)
else:
# each DataFrame's division must be greater than previous one
if all(dfs[i].divisions[-1] < dfs[i + 1].divisions[0]
for i in range(len(dfs) - 1)):
name = 'concat-{0}'.format(tokenize(*dfs))
dsk, columns = _concat_dfs(dfs, name, join=join)
divisions = []
for df in dfs[:-1]:
# remove last to concatenate with next
divisions += df.divisions[:-1]
divisions += dfs[-1].divisions
return_type = _get_return_type(dfs[0], columns)
return return_type(toolz.merge(dsk, *[df.dask for df in dfs]),
name, columns, divisions)
else:
if interleave_partitions:
from .multi import concat_indexed_dataframes
return concat_indexed_dataframes(dfs, join=join)
raise ValueError('All inputs have known divisions which cannnot '
'be concatenated in order. Specify '
'interleave_partitions=True to ignore order')
else:
if axis == 1:
raise ValueError('Unable to concatenate DataFrame with unknown '
'division specifying axis=1')
else:
# concat will not regard Series as row
dfs = _maybe_from_pandas(dfs)
name = 'concat-{0}'.format(tokenize(*dfs))
dsk, columns = _concat_dfs(dfs, name, join=join)
divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)
return_type = _get_return_type(dfs[0], columns)
return return_type(toolz.merge(dsk, *[df.dask for df in dfs]),
name, columns, divisions)
def _append(df, other, divisions):
""" Internal function to append 2 dd.DataFrame/Series instances """
# ToDo: might be possible to merge the logic to concat,
token = tokenize(df, other)
name = '{0}-append--{1}'.format(df._token_prefix, token)
dsk = {}
npart = df.npartitions
for i in range(npart):
dsk[(name, i)] = (df._name, i)
for j in range(other.npartitions):
dsk[(name, npart + j)] = (other._name, j)
dsk = toolz.merge(dsk, df.dask, other.dask)
dummy = df._empty_partition.append(other._empty_partition)
return _Frame(dsk, name, dummy, divisions)
| bsd-3-clause |
PiscesDream/Ideas | ML/co-evaluate/stage_3.py | 1 | 3508 | import cPickle
import ann
import theano
import numpy
from createData import plot_all, plot
from numpy.random import choice
import matplotlib.pyplot as plt
import theano.tensor as T
def load_data(num = None):
data, y = cPickle.load(open('data.dat', 'rb'))
less_xx = 0
if less_xx:
less = min([(y==i).sum() for i in set(y)])
count = dict(zip(set(y), [0] * len(set(y))))
new_data, new_y = [], []
for ele, eley in zip(data, y):
if count[eley] >= less: continue
count[eley] += 1
new_data.append(ele)
new_y.append(eley)
data, y = numpy.array(new_data), numpy.array(new_y)
ind = numpy.random.permutation(data.shape[0])
data = data[ind]
y = y[ind]
ind = int(len(y) * 0.7)
train_set, test_set = (data[:ind], y[:ind]), (data[ind:], y[ind:])
plot(*test_set)
raw_input('pause')
def shared_dataset(data_xy, borrow=True, num = None):
data_x, data_y = data_xy
if num:
data_x = data_x[:num]
data_y = data_y[:num]
print data_x.shape, data_y.shape
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
# valid_set_x, valid_set_y = shared_dataset(valid_set, num = num)
train_set_x, train_set_y = shared_dataset(train_set, num = num)
test_set_x, test_set_y = shared_dataset(test_set, num = num)
rval = [(train_set_x, train_set_y), #(valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def plot_in_f2(self):
plt.clf()
pred = self.pred()
plot(self.x.get_value(), pred)
plt.draw()
def plot_in_f(self):
plt.clf()
plot_all(self, 10)
plt.draw()
def test():
plt.ion()
plt.show()
datasets = load_data()
cl = ann.ANN(2, 4, hiddens=[4], lmbd = 0.)
cl.fit(datasets, lr = 0.01, batch_size = 100, n_epochs = 1000)
print cl.get_neg_log(data, T.cast(y, 'int32')).mean()
if __name__ == '__main__':
theano.config.exception_verbosity='high'
plt.ion()
plt.show()
# test()
data, y = cPickle.load(open('data.dat', 'rb'))
y = numpy.asarray(y, dtype = 'int32')
total = len(y)
size = int(total * 0.05)
# data, y = theano.shared(data, borrow = True), T.cast(theano.shared(y, borrow = True), 'int32')
#permenant memory
memory = numpy.zeros((total,))
#random sample training sample
ind = choice(total, size)
max_iteration = 10
iteration = 0
while iteration < max_iteration:
train_set = (theano.shared(data[ind]), theano.shared(y[ind]))
def plot_in_f2(self):
plt.clf()
pred = self.pred(train_set[0])
plot(train_set[0].get_value(), pred)
plt.draw()
cl = ann.ANN(2, 4, hiddens=[4], lmbd = 0.)
haha = 2000
cl.fit((train_set, train_set), lr = 0.01, batch_size = 100, n_epochs = haha,
plot = plot_in_f2, plot_interval = haha)
fitness = cl.get_neg_log(data, y)
#update the memory
memory = fitness #+ memory * 0.71
p = memory/memory.sum()
#resample
ind = choice(total, size, p = p)
| apache-2.0 |
Adai0808/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
fedotovaleksandr/rapier | app/mdx2csv.py | 1 | 1607 | # coding: utf-8
import sys
import pandas as pd
import olap.xmla.xmla as xmla
from StringIO import StringIO
def xmlamembers2list(itrbl):
result = []
for member in itrbl:
if isinstance(member, list):
label = u''
member_it = iter(member)
s = [None]
while member_it:
try:
low_member = next(member_it)
if isinstance(low_member, list):
s.append(member_it)
member_it = iter(low_member)
else:
label += u'{} '.format(low_member.Caption)
except StopIteration:
member_it = s.pop()
label = label[:-1]
else:
label = member.Caption
result.append(label)
return result
url = sys.argv[1]
user = sys.argv[2]
passw = sys.argv[3]
catalog = sys.argv[4]
mdx_str = sys.argv[5]
p = xmla.XMLAProvider()
c = None
try:
c = p.connect(location=url, username=user, password=passw)
mdx_res = c.Execute(mdx_str, Catalog=catalog)
except: pass
if c:
try: c.EndSession()
except: pass
mdx_cols = xmlamembers2list(mdx_res.getAxisTuple(axis=0))
mdx_rows = xmlamembers2list(mdx_res.getAxisTuple(axis=1))
mdx_data = [[x.FmtValue if hasattr(x, 'FmtValue') else '0' for x in cell] for cell in mdx_res.getSlice()]
mdx_df = pd.DataFrame(mdx_data,
columns=mdx_cols, index=pd.Index(mdx_rows, name='ID'))
mdx_csv_str = StringIO()
mdx_df.to_csv(mdx_csv_str)
print(mdx_csv_str.getvalue())
| mit |
ajlongart/Tesis-UIP | ColorER_SimpleColorBalanceV.py | 1 | 6974 | #!/usr/bin/env python
'''
Modulo 2 Toolbox
Color Restoration with Simplest Color Balance
Tesis Underwater Image Pre-processing
Armando Longart 10-10844
[email protected]
Descripcion: Modulo implementado para mejorar el color de las imagenes
subacuaticas. Se basa en estirar el histograma (histogram stretching)
de la imagen haciendo que los colores de la imagen de salida este
mejorada. Se uso el modelo de color HSV para el algoritmo. La diferencia
de este modulo con el SimpleColorBalance en HSV es que este solo lo hace
para el canal V (value) debido a que este indica cuan brillante es o no
una imagen. Los otros dos canales H (Hue) y S (Saturation) generan cambios
sustanciales en la imagen de salida adulterando el color y agregando blancos
respectivamente alterando los resultados.
Modulo implementado en Python
Variacion del Simple Color Balance (en RGB) de DavidYKay: https://gist.github.com/DavidYKay/9dad6c4ab0d8d7dbf3dc
'''
# Python 2/3 compatibility
import cv2
import math
import numpy as np
import sys
from matplotlib import pyplot as plt
from matplotlib import colors
#-----Funciones a Implementar-----------------------------------------------------
def apply_mask(matrix, mask, fill_value):
'''
Funcion encargada de "crear" la matriz de valores enmascarados. Estos valores son
determinados a partir de los valores altos y bajos del la funcion apply_threshold
(low_mask, high-mask como mask y de low_value, high_value como fill_value).
El orden de ejecucion es Hue, Saturation, Value
'''
masked = np.ma.array(matrix,mask=mask,fill_value=fill_value)
cv2.imshow("Masked", masked)
cv2.imshow("MaskFill", masked.filled())
cv2.imshow("MaskedFill", masked.filled([0]))
return masked.filled()
def apply_threshold(matrix, low_value, high_value):
'''
Esta funcion se encarga de crear una matriz booleana cuyos valores (True, False)
dependeran de que los valores HSV de la imagen original sean mayores o menores que
los valores umbrales maximos o minimos obtenidos en la funcion sColorBalance.
El orden de ejecucion es Hue, Saturation, Value
'''
low_mask = matrix<low_value
matrix = apply_mask(matrix,low_mask,low_value)
cv2.imshow("MatrixL", matrix)
high_mask = matrix>high_value
matrix = apply_mask(matrix,high_mask,high_value)
cv2.imshow("MatrixH", matrix)
return matrix
def sColorBalance(img_hsv, porcentaje):
'''
Funcion encarganda de:
Separar los canales HSV de la imagen (split)
Ordenar los valores de pixeles y seleccionar los "cuantiles" de la matriz ordenada
Obtener los valores max y min de la matriz ordenanda para el canal V (a partir
del porcentaje de saturacion)
Saturar la imagen para los valores max y min de cada canal
Todo esto con el fin de que los colores de la imagen recuperada ocupen el mayor rango posible
[0,255] aplicando una transformacion solo al canal V
'''
assert img_hsv.shape[2] == 3
assert porcentaje > 0 and porcentaje < 100
mitad_porcentaje = porcentaje/200.0
hueOri,satOri,valOri = cv2.split(img_hsv) #Separa los canales en HSV
filas,columnas,canales = img_hsv.shape
cv2.imshow("h", hueOri)
cv2.imshow("s", satOri)
cv2.imshow("v", valOri)
salida_canales = []
canal = valOri
assert len(canal.shape) == 2
print canal
# find the low and high precentile values (based on the input percentile)
filas,columnas = canal.shape
vec_tam = columnas*filas
flat = canal.reshape(vec_tam)
assert len(flat.shape) == 1
flat = np.sort(flat)
n_cols = flat.shape[0]
#Seleccion de los valores minimos y maximos de cada canal HSV de la imagen. Seria el stretching
bajo_val = flat[math.floor(n_cols*mitad_porcentaje)] #Calcula el valor bajo del arreglo ordenado de la matriz (img) de entrada para cada canal
alto_val = flat[math.ceil(n_cols*(1-mitad_porcentaje))] #Calcula el valor alto del arreglo ordenado de la matriz (img) de entrada para cada canal Alternativa: alto_val = flat[math.ceil(n_cols*(1-mitad_porcentaje)-1)]
#Los valores alto y bajo para cada canal HSV. El orden de impresion es Hue, Saturation, Value
print "Lowval: ", alto_val
print "Highval: ", bajo_val
# saturate below the low percentile and above the high percentile
thresholded = apply_threshold(canal,bajo_val,alto_val)
# scale the canal
normalized = cv2.normalize(thresholded,thresholded.copy(), 0, 255, cv2.NORM_MINMAX)
print normalized
cv2.imshow("Madfe", normalized)
salida_canales.append(normalized)
img_merge = cv2.merge((hueOri,satOri,normalized))
return img_merge
if __name__ == '__main__':
imgOriginal = cv2.imread('MVI_0234_Cap1.png')
img_hsv = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV) #Conversion de HSV de la imagen original RGB
#-----Llamado a Funcion----------------------------------------------------
imgRecuperada = sColorBalance(img_hsv, 1) #Porcentaje de umbral inferior y superior respecto al histograma de entrada. Este porcentaje puede ser distinto para c/limite del histograma
imgRecuperadaRGB = cv2.cvtColor(imgRecuperada,cv2.COLOR_HSV2BGR) #Conversion del canal V de la imagen recuperada a RGB
#-----Resultados----------------------------------------------------
cv2.imshow("imgOriginal", imgOriginal)
cv2.imshow("imgRecuperadaHSV", imgRecuperada)
cv2.imshow("imgRecuperada", imgRecuperadaRGB)
#-----Guardado de la imagen Recuperada-------------------------------------------
cv2.imwrite('imagenRecuperadaCR_V_RGB.jpg',imgRecuperadaRGB)
#-----Calculo de Histograma----------------------------------------------------
'''
Se calcula el histograma de la imagen Original en HSV para conocer el rango para aplicar
el algoritmo
'''
hue,sat,val = cv2.split(img_hsv)
plt.subplot(311) #plot in the first cell
plt.subplots_adjust(hspace=.5)
plt.title("Hue Original")
plt.ylabel('Numero de Pixeles')
plt.hist(np.ndarray.flatten(hue), bins=128)
plt.xlim([0,180])
plt.subplot(312) #plot in the second cell
plt.title("Saturation Original")
plt.ylabel('Numero de Pixeles')
plt.hist(np.ndarray.flatten(sat), bins=128)
plt.xlim([0,256])
plt.subplot(313) #plot in the third cell
plt.title("Luminosity Value Original")
plt.ylabel('Numero de Pixeles')
plt.hist(np.ndarray.flatten(val), bins=128)
plt.xlim([0,256])
plt.show()
'''
Aqui se calcula el histograma de la imagen Original y la Recuperada en RGB
'''
color = ('b','g','r')
for i, col in enumerate(color):
histcolorOriginal = cv2.calcHist([imgOriginal],[i],None,[256],[0,256])
histcolorRecuperadaRGB = cv2.calcHist([imgRecuperadaRGB],[i],None,[256],[0,256])
plt.subplot(211), plt.plot(histcolorOriginal,color=col)
plt.title('Histograma Original')
plt.ylabel('Numero de Pixeles')
plt.xlim([0,256])
plt.subplot(212), plt.plot(histcolorRecuperadaRGB,color=col)
plt.title('Histograma Recuperada')
plt.ylabel('Numero de Pixeles')
plt.xlabel('Bins')
plt.xlim([0,256])
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
| gpl-3.0 |
rsignell-usgs/notebook | People/Julia/tower_to_netcdf.py | 1 | 2275 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Save Tower CSV data as NetCDF
# <markdowncell>
# ### Set local variables
# <codecell>
url='http://geoport.whoi.edu/thredds/fileServer/usgs/data2/notebook/data/CR3000_SN3557_Table1_MainTowerCR3000_ground_V6.CR3.txt'
input_data="data.txt"
output_dir="/data"
output_file="julia.nc"
fillvalue=-9999.9
# <markdowncell>
# ### Download the data
# <codecell>
import urllib
urllib.urlretrieve(url, input_data)
# <codecell>
import pandas as pd
df = pd.read_csv(input_data,skiprows=[0,2,3],
parse_dates=True,
index_col='TIMESTAMP',
low_memory=False,
na_values=['NAN',''],
tupleize_cols=True)
df = df.fillna(fillvalue)
df.head()
# <markdowncell>
# ### Simple plot
# <codecell>
import matplotlib.pyplot as plt
%matplotlib inline
df[['Tsoil10cmTree_Avg','Tsoil20cmTree_Avg']].plot(figsize=(12,4));
# <markdowncell>
# ### Create netCDF file
# <codecell>
import numpy as np
def pd_to_secs(df):
# convert a pandas datetime index to seconds since 1970
import calendar
return np.asarray([ calendar.timegm(x.timetuple()) for x in df.index ], dtype=np.int64)
def cf_safe_name(name):
# Create a CF safe name for a group/dimension/variable
import re
if isinstance(name, basestring):
if re.match('^[0-9_]', name):
# Add a letter to the front
name = "v_{}".format(name)
return re.sub(r'[^_a-zA-Z0-9]', "_", name)
return name
# <codecell>
import os
out_file = os.path.join(output_dir, output_file)
if os.path.isfile(out_file):
os.remove(out_file)
from pyaxiom.netcdf.sensors import TimeSeries
ts = TimeSeries(output_dir,
latitude=0.39,
longitude=36.7,
station_name='urn:ioos:station:edu.princeton.ecohydrolab:MainTower',
global_attributes={},
times=pd_to_secs(df),
verticals=[10],
output_filename=output_file)
# <codecell>
for c in df.columns[::-1]:
# Add units based on column name?
var_attributes = dict()
ts.add_variable(cf_safe_name(c), df[c].values, attributes=var_attributes, fillvalue=-9999.9)
# <codecell>
| mit |
dhomeier/astropy | examples/template/example-template.py | 11 | 3356 | # -*- coding: utf-8 -*-
"""
========================
Title of Example
========================
This example <verb> <active tense> <does something>.
The example uses <packages> to <do something> and <other package> to <do other
thing>. Include links to referenced packages like this: `astropy.io.fits` to
show the astropy.io.fits or like this `~astropy.io.fits`to show just 'fits'
*By: <names>*
*License: BSD*
"""
##############################################################################
# Make print work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
# uncomment if including figures:
# import matplotlib.pyplot as plt
# from astropy.visualization import astropy_mpl_style
# plt.style.use(astropy_mpl_style)
##############################################################################
# This code block is executed, although it produces no output. Lines starting
# with a simple hash are code comment and get treated as part of the code
# block. To include this new comment string we started the new block with a
# long line of hashes.
#
# The sphinx-gallery parser will assume everything after this splitter and that
# continues to start with a **comment hash and space** (respecting code style)
# is text that has to be rendered in
# html format. Keep in mind to always keep your comments always together by
# comment hashes. That means to break a paragraph you still need to commend
# that line break.
#
# In this example the next block of code produces some plotable data. Code is
# executed, figure is saved and then code is presented next, followed by the
# inlined figure.
x = np.linspace(-np.pi, np.pi, 300)
xx, yy = np.meshgrid(x, x)
z = np.cos(xx) + np.cos(yy)
plt.figure()
plt.imshow(z)
plt.colorbar()
plt.xlabel('$x$')
plt.ylabel('$y$')
###########################################################################
# Again it is possible to continue the discussion with a new Python string. This
# time to introduce the next code block generates 2 separate figures.
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('hot'))
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('Spectral'), interpolation='none')
##########################################################################
# There's some subtle differences between rendered html rendered comment
# strings and code comment strings which I'll demonstrate below. (Some of this
# only makes sense if you look at the
# :download:`raw Python script <plot_notebook.py>`)
#
# Comments in comment blocks remain nested in the text.
def dummy():
"""Dummy function to make sure docstrings don't get rendered as text"""
pass
# Code comments not preceded by the hash splitter are left in code blocks.
string = """
Triple-quoted string which tries to break parser but doesn't.
"""
############################################################################
# Output of the script is captured:
print('Some output from Python')
############################################################################
# Finally, I'll call ``show`` at the end just so someone running the Python
# code directly will see the plots; this is not necessary for creating the docs
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_LightGBM_Scipy_nightly/source/scipy/stats/_stats_mstats_common.py | 10 | 8685 | from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
def linregress(x, y=None):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero, using Wald Test with t-distribution of
the test statistic.
stderr : float
Standard error of the estimated gradient.
See also
--------
:func:`scipy.optimize.curve_fit` : Use non-linear
least squares to fit a function to data.
:func:`scipy.optimize.leastsq` : Minimize the sum of
squares of a set of equations.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
r-squared: 0.080402268539
Plot the data along with the fitted line
>>> plt.plot(x, y, 'o', label='original data')
>>> plt.plot(x, intercept + slope*x, 'r', label='fitted line')
>>> plt.legend()
>>> plt.show()
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
slope = r_num / ssxm
intercept = ymean - slope*xmean
if n == 2:
# handle case when only two points are passed in
if y[0] == y[1]:
prob = 1.0
else:
prob = 0.0
sterrest = 0.0
else:
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
| mit |
nicholaschris/landsatpy | project_clouds_2.py | 1 | 3431 | import os
import models
import views
import config
import count_clouds
import utils
import cloud_detection
# import cloud_shadow_morphology
import skimage
import numpy as np
from numpy import ma
from matplotlib import pyplot as plt
from math import pi, tan, cos, sin
import imp
imp.reload(cloud_detection)
imp.reload(config)
imp.reload(models)
imp.reload(utils)
data_dir = config.data_dir
path = config.path
row = config.row
time = config.time
band_option = config.band_option
b = band_option
pcp = cloud_detection.calc_pcp()
pcl = cloud_detection.calc_pcl(pcp)
pcp = None
bpcl = bpcl = utils.dilate_boolean_array(pcl)
pcl = None
bpcl = bpcl == 255
labels, nbr_objects = count_clouds.label_clouds(bpcl,1,2)
def get_meta(var):
Scene = models.NetcdfVarModel(data_dir, path, row, time, var)
bt_10 = Scene.data(var)
dimensions= Scene.dimensions
theta_v = Scene.theta_v
theta_0= Scene.theta_0
phi_v = Scene.phi_v
phi_0= Scene.phi_0
return bt_10, dimensions, theta_v, theta_0, phi_v, phi_0
bt_10, dimensions, theta_v, theta_0, phi_v, phi_0 = get_meta('BT_B10')
th0 = theta_0
phi0 = pi - phi_0 # 180deg - azimuth angle
def max_x_y_offset(th0, phi0):
d = 12000/30/4 # cloud_height(label_no)/30
x_offset = - d*tan(th0)*sin(phi0)
y_offset = - d*tan(th0)*cos(phi0)
return x_offset, y_offset
def offset_sign(x_offset, y_offset):
if x_offset <= 0:
x_offset_neg, x_offset_pos = x_offset, 0
if y_offset <= 0:
y_offset_neg, y_offset_pos = y_offset, 0
if x_offset > 0:
x_offset_neg, x_offset_pos = 0, x_offset
if y_offset > 0:
y_offset_neg, y_offset_pos = 0, y_offset
return x_offset_neg, x_offset_pos, y_offset_neg, y_offset_pos
def create_expanded_zone(labels=labels):
x_offset, y_offset = max_x_y_offset(th0, phi0)
amxn, amxp, amyn, amyp = offset_sign(x_offset, y_offset)
amxn, amxp, amyn, amyp = np.int(amxn), np.int(amxp), np.int(amyn), np.int(amyp)
_tmp_shape = -amyn+labels.shape[0]+amyp, -amxn+labels.shape[1]+amxp
shadowy = np.zeros(_tmp_shape)
return shadowy, _tmp_shape
def iter_shadowmaker(labels, nbr_objects):
x_offset, y_offset = max_x_y_offset(th0, phi0)
amxn, amxp, amyn, amyp = offset_sign(x_offset, y_offset)
amxn, amxp, amyn, amyp = np.int(amxn), np.int(amxp), np.int(amyn), np.int(amyp)
print(amxn, amxp, amyn, amyp)
_tmp_shape = -amyn+labels.shape[0]+amyp, -amxn+labels.shape[1]+amxp
shadowy = np.zeros(_tmp_shape)
# test = np.zeros(_tmp_shape)
for label_no in range(1, (nbr_objects+1)): # nbr_objects+1
cloud_object_inds = np.where(labels==label_no)
x_inds = cloud_object_inds[1] - amxn + amxn + amxp
y_inds = cloud_object_inds[0] - amyn + amyn + amyp
shadowy[y_inds, x_inds] = label_no
# test[cloud_object_inds[0], cloud_object_inds[1]] = label_no
# return shadowys
return shadowy[-amyn:_tmp_shape[0]-amyp, -amxn:_tmp_shape[1]-amxp ]
if __name__ == "__main__":
shad = iter_shadowmaker(labels, nbr_objects)
'''
for cloud in list of clouds:
make a bigger area
shift cloud required space
def shift cloud required space(th0, phi0):
distance = 12*tan(th0) # the projection distance
x_offset = - d*tan(th0)*sin(phi0)
y_offset = - d*tan(th0)*cos(phi0) # the x and y coords in unit terms (check pos neg values)
the x and y coords scaled to the projection distance
''' | mit |
olologin/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
billy-inn/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
andrew0harney/Semantic-encoding-model | ldaUtils.py | 1 | 4140 | import glob
import re
import pickle
import numpy as np
import pandas as pd
import logging
from GridRegression import Encoder
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('__ldaUtils__')
"""Utility functions and classes for working with event(epoch) encodings"""
__author__ = 'Andrew O\Harney'
class LdaEncoding:
"""Stores LDA encoding results
Useful for enabling comparisons of encoding probabilities on a given topic"""
name = None
values = None
topicN = None
def __init__(self,name,values,topicN=0):
self.name = name
self.values = values
self.topicN = topicN
def __cmp__(self,y,topicN=None):
if topicN is None:
topicN = self.topicN
return np.sign(self.values[topicN]-y.values[topicN])
def __getitem__(self,topicN):
return self.values[topicN]
def __str__(self,topicN=None):
return self.name if topicN is None else self.name + ' ' + str(self.values[topicN])
def setTopicN(self,topicN):
#Topic number to compare probabilities of
self.topicN = topicN
def createLabeledCorpDict(labeledImageDictionaryName,sourceReg,output=None):
"""Creates labeled dictionary of corpora for referencing
Sample running:
INFO:__ldaUtils__:Processing .../labeled_image_maps/003770.labels.txt
INFO:__ldaUtils__:Processing .../labeled_image_maps/003771.labels.txt
INFO:__ldaUtils__:Processing .../labeled_image_maps/003772.labels.txt
Sample output:
{3770: ['man', 'bull', 'people', 'stadium', 'dirt'],
3771: ['grass', 'sky', 'trees', 'village'],
3772: ['seal', 'rocks']}
Keyword arguments:
labeledImageDictionaryName -- Name for the dictionary
sourceReg -- Regular expression to find labeled image files
Output -- Pickle the dictionary {true,false}"""
if not glob.glob(labeledImageDictionaryName):
docs = dict()
for tFile in glob.glob(sourceReg):
logger.info('Processing '+str(tFile))
a =open(tFile).read().splitlines()
doc=[]
for line in a:
line = re.findall(r"[\w']+",line)
if len(line)>1:
for item in line:
item = item.lower()
elif line != []:
item = line[0].lower()
doc.append(item)
docs[int(re.findall('[0-9]+', tFile)[0])] = list(set(doc))
#docs[ntpath.basename(tFile)] = list(set(doc))
if output is not None:
pickle.dump(docs, file(labeledImageDictionaryName,'w'))
return docs
else:
return pickle.load(file(labeledImageDictionaryName,'r'))
class LdaEncoder(Encoder):
"Class to encapsulate encoding of an event for a given LDA model"
#
__ldaDict__ = None
__ldaModel__= None
__docs__ = None
__modelWordList__ = None
__numClasses__ = None
#
def __init__(self,ldaDict,docs,lda):
#
self.__ldaDict__ = ldaDict
self.__ldaModel__ = lda
self.__numClasses__ = lda.num_topics
self.__docs__ = docs
self.__modelWordList__ = [self.__ldaModel__.id2word[wordid] for wordid in self.__ldaDict__] #Get valid words for this model
#
def numClasses(self):
return self.__numClasses__
#
def __getitem__(self,event,eps=0):
#Get stim fname
stimName = event['label']
#If it is a stimulus period
if stimName >= 0:
stimWords = self.__docs__[stimName] #Get the labels for the given stimulus
topicProbs= self.model().__getitem__(self.__ldaDict__.doc2bow([word for word in stimWords if word in self.__modelWordList__]),eps=eps) #Get the topic encoding
#Series with {topicNum:prob} structure
return pd.Series([tprob for (_,tprob) in topicProbs],index=[topicNum for (topicNum,_)in topicProbs])
else: #If it is an isi
return np.zeros([self.model().num_topics]) | mit |
pprett/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.