repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
squirrelo/qiime | scripts/make_distance_comparison_plots.py | 15 | 19365 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "[email protected]"
from os.path import join
from string import strip
from skbio.util import create_dir
from skbio.draw import grouped_distributions
from qiime.colors import data_colors, data_color_order
from qiime.group import get_field_state_comparisons
from qiime.colors import matplotlib_rgb_color
from qiime.parse import (group_by_field, parse_distmat, parse_mapping_file,
QiimeParseError)
from qiime.stats import all_pairs_t_test, tail_types
from qiime.util import (get_options_lookup, make_option,
parse_command_line_parameters)
script_info = {}
script_info[
'brief_description'] = "Creates plots comparing distances between sample groupings"
script_info['script_description'] = """
This script creates plots (bar charts, scatter plots, or box plots) that
allow for the comparison between samples grouped at different field states
of a mapping file field.
This script can work with any field in the mapping file, and it can compare
any number of field states to all other field states within that field.
This script may be especially useful for fields that represent a time series,
because a plot can be generated showing the distances between samples at
certain timepoints against all other timepoints.
For example, a time field might contain the values 1, 2, 3, 4, and 5, which
label samples that are from day 1, day 2, day 3, and so on. This time field
can be specified when the script is run, as well as the timepoint(s) to
compare to every other timepoint. For example, two comparison groups
might be timepoints 1 and 2. The resulting plot would contain timepoints for
days 3, 4, and 5 along the x-axis, and at each of those timepoints, the
distances between day 1 and that timepoint would be plotted, as well as the
distances between day 2 and the timepoint.
The script also performs two-sample t-tests for all pairs of distributions to
help determine which distributions are significantly different from each other.
Tip: the script tries its best to fit everything into the plot, but there are
cases where plot elements may get cut off (e.g. if axis labels are extremely
long), or things may appear squashed, cluttered, or too small (e.g. if
there are many boxplots in one plot). Increasing the width and/or height of the
plot (using --width and --height) usually fixes these problems.
For more information and examples pertaining to this script, please refer to
the accompanying tutorial, which can be found at
http://qiime.org/tutorials/creating_distance_comparison_plots.html.
"""
script_info['script_usage'] = []
script_info['script_usage'].append((
"Compare distances between Native and Input samples for each timepoint in the "
"Time field",
"This example will generate a PDF containing a bar chart with the distances "
"between Native samples and every other timepoint, as well as the distances "
"between Input samples and every other timepoint. The output image will be "
"put in the 'out1' directory. For more details about this example input data, "
"please refer to the accompanying tutorial.",
"%prog -d forearm_only_unweighted_unifrac_dm.txt -m "
"costello_timeseries_map.txt -f TIME_SINCE_TRANSPLANT -c \"Native,Input\" -o "
"out1"))
script_info['output_description'] = """
An image of the plot is written to the specified output directory. The raw data
used in the plots and the results of significance tests can optionally be
written into tab-delimited files that are most easily viewed in a spreadsheet
program such as Microsoft Excel.
"""
options = get_options_lookup()
script_info['required_options'] = [
options['mapping_fp'],
options['output_dir'],
make_option('-d', '--distance_matrix_fp',
help='input distance matrix filepath (i.e. the result of '
'beta_diversity.py). WARNING: Only symmetric, hollow distance '
'matrices may be used as input. Asymmetric distance matrices, such as '
'those obtained by the UniFrac Gain metric (i.e. beta_diversity.py '
'-m unifrac_g), should not be used as input',
type='existing_filepath'),
make_option('-f', '--field', type='string',
help='field in the mapping file to make comparisons on'),
make_option('-c', '--comparison_groups', type='string',
help='comma-separated list of field states to compare to every other '
'field state, where the list of field states should be in quotes '
'(e.g. "FieldState1,FieldState2,FieldState3")')]
script_info['optional_options'] = [
make_option('-t', '--plot_type',
help='type of plot to produce ("bar" is bar chart, "scatter" is '
'scatter plot, and "box" is box plot) [default: %default]',
default='bar', type='choice', choices=['bar', 'scatter', 'box']),
make_option('-g', '--imagetype',
help='type of image to produce (i.e. png, svg, pdf) '
'[default: %default]', default='pdf', type="choice",
choices=['pdf', 'png', 'svg']),
make_option('--save_raw_data', action='store_true',
help='store raw data used to create plot in a tab-delimited file '
'[default: %default]',
default=False),
make_option('--suppress_significance_tests', action='store_true',
help='suppress performing signifance tests between each pair of '
'distributions [default: %default]', default=False),
make_option('-n', '--num_permutations', type='int',
help='the number of Monte Carlo permutations to perform when '
'calculating the nonparametric p-value in the significance tests. '
'Must be an integer greater than or equal to zero. If zero, the '
'nonparametric p-value will not be calculated and will instead be '
'reported as "N/A". This option has no effect if '
'--suppress_significance_tests is supplied [default: %default]',
default=0),
make_option('--tail_type', type='choice',
choices=tail_types, help='the type of tail test to compute when '
'calculating the p-values in the significance tests. "high" specifies '
'a one-tailed test for values greater than the observed t statistic, '
'while "low" specifies a one-tailed test for values less than the '
'observed t statistic. "two-sided" specifies a two-tailed test for '
'values greater in magnitude than the observed t statistic. This '
'option has no effect if --suppress_significance_tests is supplied. '
'Valid choices: ' +
' or '.join(tail_types) + ' [default: %default]',
default='two-sided'),
make_option('--width',
help='width of the output image in inches [default: %default]',
default=12, type='float'),
make_option('--height',
help='height of the output image in inches [default: %default]',
default=6, type='float'),
make_option('--x_tick_labels_orientation',
help='type of orientation for x-axis tick labels [default: %default]',
default='vertical', type='choice', choices=['vertical', 'horizontal']),
make_option('-a', '--label_type',
help='Label type ("numeric" or "categorical"). '
'If the label type is defined as numeric, the x-axis will be '
'scaled accordingly. Otherwise the x-values will treated '
'categorically and will be evenly spaced [default: %default].',
default='categorical',
type='choice', choices=['categorical', 'numeric']),
make_option('--y_min',
help='the minimum y-axis value in the resulting plot. If "auto", '
'it is automatically calculated [default: %default]',
default=0, type='string'),
make_option('--y_max',
help='the maximum y-axis value in the resulting plot. If "auto", '
'it is automatically calculated [default: %default]',
default=1, type='string'),
make_option('--transparent', action='store_true',
help='make output images transparent (useful for overlaying an image '
'on top of a colored background ) [default: %default]',
default=False),
make_option('--whisker_length',
help='if --plot_type is "box", determines the length of the whiskers '
'as a function of the IQR. For example, if 1.5, the whiskers '
'extend to 1.5 * IQR. Anything outside of that range is seen as '
'an outlier. If --plot_type is not "box", this option is ignored '
'[default: %default]',
default='1.5', type='float'),
make_option('--error_bar_type',
help='if --plot_type is "bar", determines the type of error bars to '
'use. "stdv" is standard deviation and "sem" is the standard '
'error of the mean. If --plot_type is not "bar", this option is '
'ignored [default: %default]',
default='stdv', type='choice', choices=['stdv', 'sem']),
make_option('--distribution_width',
help='width (in plot units) of each individual distribution (e.g. each '
'bar if the plot type is a bar chart, or the width of each box '
'if the plot type is a boxplot) [default: auto]',
default=None, type='float')]
script_info[
'option_label'] = {'mapping_fp': 'QIIME-formatted mapping filepath',
'output_dir': 'output directory',
'distance_matrix_fp':
'distance matrix filepath',
'field': 'field in mapping file',
'comparison_groups': 'field states to compare',
'imagetype': 'output image format',
'save_raw_data': 'save raw data used in plot',
'plot_type': 'output plot type',
'width': 'image width',
'height': 'image height',
'x_tick_labels_orientation':
'x-axis tick label '
'orientation',
'label_type': 'x-axis label type',
'y_min': 'y-axis min',
'y_max': 'y-axis max',
'transparent': 'make images transparent',
'whisker_length': 'whisker length as function '
'of IQR',
'error_bar_type': 'type of error bars to use ',
'distribution_width': 'width of each '
'distribution'}
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
# Create the output dir if it doesn't already exist.
try:
create_dir(opts.output_dir)
except:
option_parser.error("Could not create or access output directory "
"specified with the -o option.")
# Parse the distance matrix and mapping file.
try:
dist_matrix_header, dist_matrix = parse_distmat(
open(opts.distance_matrix_fp, 'U'))
except:
option_parser.error("This does not look like a valid distance matrix "
"file. Please supply a valid distance matrix file using the -d "
"option.")
try:
mapping, mapping_header, mapping_comments = parse_mapping_file(
open(opts.mapping_fp, 'U'))
except QiimeParseError:
option_parser.error("This does not look like a valid metadata mapping "
"file. Please supply a valid mapping file using the -m option.")
# Make sure the y_min and y_max options make sense, as they can be either
# 'auto' or a number.
y_min = opts.y_min
y_max = opts.y_max
try:
y_min = float(y_min)
except ValueError:
if y_min == 'auto':
y_min = None
else:
option_parser.error("The --y_min option must be either a number "
"or 'auto'.")
try:
y_max = float(y_max)
except ValueError:
if y_max == 'auto':
y_max = None
else:
option_parser.error("The --y_max option must be either a number "
"or 'auto'.")
# Parse the field states that will be compared to every other field state.
comparison_field_states = opts.comparison_groups
comparison_field_states = map(strip, comparison_field_states.split(','))
comparison_field_states = [field_state.strip('"').strip("'")
for field_state in comparison_field_states]
if comparison_field_states is None:
option_parser.error("You must provide at least one field state to "
"compare (using the -c option).")
# Get distance comparisons between each field state and each of the
# comparison field states.
field = opts.field
comparison_groupings = get_field_state_comparisons(dist_matrix_header,
dist_matrix, mapping_header, mapping, field,
comparison_field_states)
# Grab a list of all field states that had the comparison field states
# compared against them. These will be plotted along the x-axis.
field_states = comparison_groupings.keys()
def custom_comparator(x, y):
try:
num_x = float(x)
num_y = float(y)
return int(num_x - num_y)
except:
if x < y:
return -1
elif x > y:
return 1
else:
return 0
# Sort the field states as numbers if the elements are numbers, else sort
# them lexically.
field_states.sort(custom_comparator)
# If the label type is numeric, get a list of all field states in sorted
# numeric order. These will be used to determine the spacing of the
# field state 'points' along the x-axis.
x_spacing = None
if opts.label_type == "numeric":
try:
x_spacing = sorted(map(float, field_states))
except:
option_parser.error("The 'numeric' label type is invalid because "
"not all field states could be converted into "
"numbers. Please specify a different label "
"type.")
# Accumulate the data for each field state 'point' along the x-axis.
plot_data = []
plot_x_axis_labels = []
for field_state in field_states:
field_state_data = []
for comp_field_state in comparison_field_states:
field_state_data.append(
comparison_groupings[field_state][comp_field_state])
plot_data.append(field_state_data)
plot_x_axis_labels.append(field_state)
# Plot the data and labels.
plot_title = "Distance Comparisons"
plot_x_label = field
plot_y_label = "Distance"
# If we are creating a bar chart or box plot, grab a list of good data
# colors to use.
plot_type = opts.plot_type
plot_colors = None
if plot_type == "bar" or plot_type == "box":
plot_colors = [matplotlib_rgb_color(data_colors[color].toRGB())
for color in data_color_order]
assert plot_data, "Error: there is no data to plot!"
width = opts.width
height = opts.height
if width <= 0 or height <= 0:
option_parser.error("The specified width and height of the image must "
"be greater than zero.")
plot_figure = grouped_distributions(
opts.plot_type, plot_data, x_values=x_spacing,
data_point_labels=plot_x_axis_labels,
distribution_labels=comparison_field_states,
distribution_markers=plot_colors, x_label=plot_x_label,
y_label=plot_y_label, title=plot_title,
x_tick_labels_orientation=opts.x_tick_labels_orientation, y_min=y_min,
y_max=y_max, whisker_length=opts.whisker_length,
error_bar_type=opts.error_bar_type,
distribution_width=opts.distribution_width, figure_width=width,
figure_height=height)
# Save the plot in the specified format.
output_plot_fp = join(opts.output_dir, "%s_Distance_Comparisons.%s" %
(field, opts.imagetype))
plot_figure.savefig(output_plot_fp, format=opts.imagetype,
transparent=opts.transparent)
if not opts.suppress_significance_tests:
sig_tests_f = open(join(opts.output_dir, "%s_Stats.txt" % field), 'w')
# Rearrange the plot data into a format suitable for all_pairs_t_test.
sig_tests_labels = []
sig_tests_data = []
for data_point, data_point_label in zip(plot_data, plot_x_axis_labels):
for dist, comp_field in zip(data_point, comparison_field_states):
sig_tests_labels.append('%s vs %s' % (data_point_label,
comp_field))
sig_tests_data.append(dist)
sig_tests_results = all_pairs_t_test(sig_tests_labels, sig_tests_data,
tail_type=opts.tail_type,
num_permutations=opts.num_permutations)
sig_tests_f.write(sig_tests_results)
sig_tests_f.close()
if opts.save_raw_data:
# Write the raw plot data into a tab-delimited file, where each line
# has the distances between a comparison group and another field state
# 'point' along the x-axis.
assert (len(plot_x_axis_labels) == len(plot_data)), "The number of " +\
"labels do not match the number of points along the x-axis."
raw_data_fp = join(opts.output_dir,
"%s_Distance_Comparisons.txt" % field)
raw_data_f = open(raw_data_fp, 'w')
raw_data_f.write("#ComparisonGroup\tFieldState\tDistances\n")
for label, data in zip(plot_x_axis_labels, plot_data):
assert (len(comparison_field_states) == len(data)), "The " +\
"number of specified comparison groups does not match " +\
"the number of groups found at the current point along " +\
"the x-axis."
for comp_field_state, comp_grp_data in zip(comparison_field_states, data):
raw_data_f.write(comp_field_state + "\t" + label + "\t" +
"\t".join(map(str, comp_grp_data)) + "\n")
raw_data_f.close()
if __name__ == "__main__":
main()
| gpl-2.0 |
cuilishen/cuilishenMissionPlanner | Lib/site-packages/scipy/signal/waveforms.py | 55 | 11609 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
def sawtooth(t, width=1):
"""
Return a periodic sawtooth waveform.
The sawtooth waveform has a period 2*pi, rises from -1 to 1 on the
interval 0 to width*2*pi and drops from 1 to -1 on the interval
width*2*pi to 2*pi. `width` must be in the interval [0,1].
Parameters
----------
t : array_like
Time.
width : float, optional
Width of the waveform. Default is 1.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 20*np.pi, 500)
>>> plt.plot(x, sp.signal.sawtooth(x))
"""
t,w = asarray(t), asarray(width)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,tsub / (pi*wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3, (pi*(wsub+1)-tsub)/(pi*(1-wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period 2*pi, has value +1 from 0 to 2*pi*duty
and -1 from 2*pi*duty to 2*pi. `duty` must be in the interval [0,1].
Parameters
----------
t : array_like
The input time array.
duty : float, optional
Duty cycle.
Returns
-------
y : array_like
The output square wave.
"""
t,w = asarray(t), asarray(duty)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to duty*2*pi function is
# 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3,-1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, retenv=False):
"""
Return a gaussian modulated sinusoid: exp(-a t^2) exp(1j*2*pi*fc*t).
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray, or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (Hz).
Default is 0.5.
bwr: float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi*fc*bw)**2 / (4.0*log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref)/a)
yenv = exp(-a*t*t)
yI = yenv * cos(2*pi*fc*t)
yQ = yenv * sin(2*pi*fc*t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per time unit';
there is no assumption here that the time unit is one second. The
important distinction is that the units of rotation are cycles, not
radians.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
f0 : float
Frequency (in Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (in Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2*pi*f(t)``.
``f(t)`` is defined below.
See Also
--------
scipy.signal.waveforms.sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f1 must be positive, and f0 must be greater than f1.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2*pi * (f0*t + 0.5*beta*t*t)
elif method in ['quadratic','quad','q']:
beta = (f1 - f0)/(t1**2)
if vertex_zero:
phase = 2*pi * (f0*t + beta * t**3/3)
else:
phase = 2*pi * (f1*t + beta * ((t1 - t)**3 - t1**3)/3)
elif method in ['logarithmic', 'log', 'lo']:
if f0*f1 <= 0.0:
raise ValueError("For a geometric chirp, f0 and f1 must be nonzero " \
"and have the same sign.")
if f0 == f1:
phase = 2*pi * f0 * t
else:
beta = t1 / log(f1/f0)
phase = 2*pi * beta * f0 * (pow(f1/f0, t/t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f1 <= 0.0 or f0 <= f1:
raise ValueError("hyperbolic chirp requires f0 > f1 > 0.0.")
c = f1*t1
df = f0 - f1
phase = 2*pi * (f0 * c / df) * log((df*t + c)/c)
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic', "
"or 'hyperbolic', but a value of %r was given." % method)
return phase
def sweep_poly(t, poly, phi=0):
"""Frequency-swept cosine generator, with a time-dependent frequency
specified as a polynomial.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1D ndarray (or array-like), or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees. Default is 0.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``;
``f(t)`` is defined above.
See Also
--------
scipy.signal.waveforms.chirp
Notes
-----
.. versionadded:: 0.8.0
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2*pi * polyval(intpoly, t)
return phase
| gpl-3.0 |
ddboline/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py | 39 | 5044 | """
WRITEME
"""
import logging
from ..linear import LinearTransform
from .unshared_conv import FilterActs, ImgActs
from theano.compat.six.moves import xrange
from theano.sandbox import cuda
if cuda.cuda_available:
import gpu_unshared_conv # register optimizations
import numpy as np
import warnings
try:
import matplotlib.pyplot as plt
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
logger = logging.getLogger(__name__)
class LocalDot(LinearTransform):
"""
LocalDot is an linear operation computationally similar to
convolution in the spatial domain, except that whereas convolution
applying a single filter or set of filters across an image, the
LocalDot has different filterbanks for different points in the image.
Mathematically, this is a general linear transform except for a
restriction that filters are 0 outside of a spatially localized patch
within the image.
Image shape is 5-tuple:
color_groups
colors_per_group
rows
cols
images
Filterbank shape is 7-tuple (!)
0 row_positions
1 col_positions
2 colors_per_group
3 height
4 width
5 color_groups
6 filters_per_group
The result of left-multiplication a 5-tuple with shape:
filter_groups
filters_per_group
row_positions
col_positions
images
Parameters
----------
filters : WRITEME
irows : WRITEME
Image rows
icols : WRITEME
Image columns
subsample : WRITEME
padding_start : WRITEME
filters_shape : WRITEME
message : WRITEME
"""
def __init__(self, filters, irows, icols=None,
subsample=(1, 1),
padding_start=None,
filters_shape=None,
message=""):
LinearTransform.__init__(self, [filters])
self._filters = filters
if filters_shape is None:
self._filters_shape = tuple(filters.get_value(borrow=True).shape)
else:
self._filters_shape = tuple(filters_shape)
self._irows = irows
if icols is None:
self._icols = irows
else:
self._icols = icols
if self._icols != self._irows:
raise NotImplementedError('GPU code at least needs square imgs')
self._subsample = tuple(subsample)
self._padding_start = padding_start
if len(self._filters_shape) != 7:
raise TypeError('need 7-tuple filter shape', self._filters_shape)
if self._subsample[0] != self._subsample[1]:
raise ValueError('subsampling must be same in rows and cols')
self._filter_acts = FilterActs(self._subsample[0])
self._img_acts = ImgActs(module_stride=self._subsample[0])
if message:
self._message = message
else:
self._message = filters.name
def rmul(self, x):
"""
.. todo::
WRITEME
"""
assert x.ndim == 5
return self._filter_acts(x, self._filters)
def rmul_T(self, x):
"""
.. todo::
WRITEME
"""
return self._img_acts(self._filters, x, self._irows, self._icols)
def col_shape(self):
"""
.. todo::
WRITEME
"""
ishape = self.row_shape() + (-99,)
fshape = self._filters_shape
hshape, = self._filter_acts.infer_shape(None, (ishape, fshape))
assert hshape[-1] == -99
return hshape[:-1]
def row_shape(self):
"""
.. todo::
WRITEME
"""
fshape = self._filters_shape
fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2]
fgroups, filters_per_group = fshape[-2:]
return fgroups, fcolors, self._irows, self._icols
def print_status(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("TODO: fix dependence on non-existent "
"ndarray_status function")
"""print ndarray_status(
self._filters.get_value(borrow=True),
msg='%s{%s}'% (self.__class__.__name__,
self._message))
"""
def imshow_gray(self):
"""
.. todo::
WRITEME
"""
filters = self._filters.get_value()
modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape
logger.info(filters.shape)
rval = np.zeros((
modR * (rows + 1) - 1,
modC * (cols + 1) - 1,
))
for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)):
for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)):
rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0]
plt.imshow(rval, cmap='gray')
return rval
| bsd-3-clause |
Sentient07/scikit-learn | sklearn/metrics/regression.py | 47 | 19967 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Karan Desai <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
from ..externals.six import string_types
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average',
'variance_weighted')
if isinstance(multioutput, string_types):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str,
multioutput))
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_log_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared logarithmic error regression loss
Read more in the :ref:`User Guide <mean_squared_log_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average'] \
or array-like of shape = (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_log_error
>>> y_true = [3, 5, 2.5, 7]
>>> y_pred = [2.5, 5, 4, 8]
>>> mean_squared_log_error(y_true, y_pred) # doctest: +ELLIPSIS
0.039...
>>> y_true = [[0.5, 1], [1, 2], [7, 6]]
>>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]]
>>> mean_squared_log_error(y_true, y_pred) # doctest: +ELLIPSIS
0.044...
>>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.004..., 0.083...])
>>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.060...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if not (y_true >= 0).all() and not (y_pred >= 0).all():
raise ValueError("Mean Squared Logarithmic Error cannot be used when "
"targets contain negative values.")
return mean_squared_error(np.log(y_true + 1), np.log(y_pred + 1),
sample_weight, multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred, sample_weight=None,
multioutput="uniform_average"):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default is "uniform_average".
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
.. versionchanged:: 0.19
Default value of multioutput is 'uniform_average'.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted')
... # doctest: +ELLIPSIS
0.938...
>>> y_true = [1,2,3]
>>> y_pred = [1,2,3]
>>> r2_score(y_true, y_pred)
1.0
>>> y_true = [1,2,3]
>>> y_pred = [2,2,2]
>>> r2_score(y_true, y_pred)
0.0
>>> y_true = [1,2,3]
>>> y_pred = [3,2,1]
>>> r2_score(y_true, y_pred)
-3.0
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
bbfamily/abu | abupy/WidgetBu/ABuWGBRun.py | 1 | 8791 | # -*- encoding:utf-8 -*-
"""上层回测图形可视化"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import pandas as pd
from IPython.display import display
import ipywidgets as widgets
from ..UtilBu import ABuProgress
from ..WidgetBu.ABuWGBase import WidgetBase, show_msg_func, show_msg_toast_func
from ..WidgetBu.ABuWGBRunBase import WidgetRunTT
from ..WidgetBu.ABuWGBSymbol import WidgetSymbolChoice
from ..WidgetBu.ABuWGBFBase import BuyFactorWGManager
from ..WidgetBu.ABuWGSFBase import SellFactorWGManager
from ..WidgetBu.ABuWGPSBase import PickStockWGManager
from ..WidgetBu.ABuWGPosBase import PosWGManager
from ..WidgetBu.ABuWGUmp import WidgetUmp
from ..CoreBu.ABu import run_loop_back
from ..CoreBu.ABuStore import store_abu_result_out_put
# noinspection PyUnresolvedReferences
from ..CoreBu.ABuFixes import filter
from ..MarketBu.ABuDataCheck import check_symbol_data_mode
from ..BetaBu import ABuAtrPosition, ABuPositionBase
from ..AlphaBu import ABuPickTimeExecute
from ..TradeBu.ABuBenchmark import AbuBenchmark
from ..TradeBu.ABuCapital import AbuCapital
from ..MetricsBu.ABuMetricsBase import AbuMetricsBase
from ..CoreBu.ABuStore import AbuResultTuple
__author__ = '阿布'
__weixin__ = 'abu_quant'
# noinspection PyProtectedMember
class WidgetRunLoopBack(WidgetBase):
"""基础界面可以化:初始资金,回测开始,结束周期,参考大盘等"""
# noinspection PyProtectedMember
def __init__(self):
"""构建回测需要的各个组件形成tab"""
self.tt = WidgetRunTT()
self.sc = WidgetSymbolChoice()
self.bf = BuyFactorWGManager()
self.sf = SellFactorWGManager()
# 卖出策略管理注册买入策略接收改变
self.sf.register(self.bf)
self.ps = PickStockWGManager()
# 选股策略管理注册买入策略接收改变
self.ps.register(self.bf)
self.pos = PosWGManager()
# 资金管理注册买入策略接收改变
self.pos.register(self.bf)
# 构造裁判界面
self.ump = WidgetUmp()
sub_widget_tab = widgets.Tab()
sub_widget_tab.children = [self.tt.widget, self.sc.widget, self.bf.widget, self.sf.widget, self.ps.widget,
self.pos.widget, self.ump.widget]
for ind, name in enumerate([u'基本', u'股池', u'买策', u'卖策', u'选股', u'资管', u'裁判']):
sub_widget_tab.set_title(ind, name)
self.run_loop_bt = widgets.Button(description=u'开始回测', layout=widgets.Layout(width='98%'),
button_style='danger')
self.run_loop_bt.on_click(self.run_loop_back)
self.widget = widgets.VBox([sub_widget_tab, self.run_loop_bt])
def _metrics_out_put(self, metrics, abu_result_tuple):
"""针对输出结果和界面中的设置进行输出操作"""
if metrics is None:
return
if self.tt.metrics_mode.value == 0:
metrics.plot_returns_cmp(only_show_returns=True)
else:
metrics.plot_order_returns_cmp(only_info=True)
pd.options.display.max_rows = self.tt.out_put_display_max_rows.value
pd.options.display.max_columns = self.tt.out_put_display_max_columns.value
"""
options={u'只输出交易单:orders_pd': 0,
u'只输出行为单:action_pd': 1,
u'只输出资金单:capital_pd': 2,
u'同时输出交易单,行为单,资金单':3
"""
if self.tt.metrics_out_put.value == 0 or self.tt.metrics_out_put.value == 3:
show_msg_func(u'交易买卖详情单:')
display(abu_result_tuple.orders_pd)
if self.tt.metrics_out_put.value == 1 or self.tt.metrics_out_put.value == 3:
show_msg_func(u'交易行为详情单:')
display(abu_result_tuple.action_pd)
if self.tt.metrics_out_put.value == 2 or self.tt.metrics_out_put.value == 3:
show_msg_func(u'交易资金详细单:')
display(abu_result_tuple.capital.capital_pd)
show_msg_func(u'交易手续费详单:')
display(abu_result_tuple.capital.commission.commission_df)
if self.tt.save_out_put.value is True:
# 本地保存各个交易单到文件
store_abu_result_out_put(abu_result_tuple)
# noinspection PyUnusedLocal
def run_loop_back(self, bt):
"""运行回测所对应的button按钮"""
# 清理之前的输出结果
# ABuProgress.clear_output()
base_run = self.tt
# 初始资金
cash = base_run.cash.value
n_folds = 2
start = None
end = None
if not base_run.run_years.disabled:
# 如果使用年回测模式
n_folds = base_run.run_years.value
if not base_run.start.disabled:
# 使用开始回测日期
start = base_run.start.value
if not base_run.end.disabled:
# 使用结束回测日期
end = base_run.end.value
choice_symbols = self.sc.choice_symbols.options
if choice_symbols is not None and len(choice_symbols) == 0:
# 如果一个symbol都没有设置None, 将使用选择的市场进行全市场回测
choice_symbols = None
if not check_symbol_data_mode(choice_symbols):
return
# 买入策略构成序列
buy_factors = list(self.bf.factor_dict.values())
if len(buy_factors) == 0:
msg = u'没有添加任何一个买入策略!'
show_msg_toast_func(msg)
return
# 卖出策略可以一个也没有
sell_factors = list(self.sf.factor_dict.values())
pos_class_list = list(self.pos.factor_dict.values())
if len(pos_class_list) == 1:
# 资金仓位管理全局策略设置, [0]全局仓位管理策略只能是一个且是唯一
ABuPositionBase.g_default_pos_class = pos_class_list[0]
# 裁判根据工作模式进行回测前设置
self.ump.run_before()
if choice_symbols is not None and len(choice_symbols) == 1:
# 如果只有1支股票回测,直接使用这个股票做为做为对比基准
benchmark = AbuBenchmark(choice_symbols[0])
capital = AbuCapital(cash, benchmark)
if len(pos_class_list) == 0:
# 如果只有1支股票回测,且没有修改过资金管理设置,持仓比例调高
ABuAtrPosition.g_atr_pos_base = 0.5
# 就一只股票的情况下也不运行选股策略
orders_pd, action_pd, _ = ABuPickTimeExecute.do_symbols_with_same_factors(choice_symbols,
benchmark,
buy_factors,
sell_factors,
capital, show=True)
abu_result_tuple = AbuResultTuple(orders_pd, action_pd, capital, benchmark)
metrics = AbuMetricsBase(orders_pd, action_pd, capital, benchmark)
else:
# 针对选股策略中需要choice_symbols的情况进行选股策略choice_symbols更新
self.ps.seed_choice_symbol_update(choice_symbols)
# 多只的情况下使用选股策略
stock_picks = list(self.ps.factor_dict.values())
if len(stock_picks) == 0:
stock_picks = None
# 多只股票使用run_loop_back
abu_result_tuple, _ = run_loop_back(cash,
buy_factors,
sell_factors,
stock_picks,
choice_symbols=choice_symbols,
start=start,
end=end,
n_folds=n_folds)
if abu_result_tuple is None:
return
ABuProgress.clear_output()
metrics = AbuMetricsBase(*abu_result_tuple)
metrics.fit_metrics()
self._metrics_out_put(metrics, abu_result_tuple)
# ump收尾工作
self.ump.run_end(abu_result_tuple, choice_symbols, list(self.bf.factor_dict.keys()),
list(self.sf.factor_dict.keys()), list(self.ps.factor_dict.keys()))
| gpl-3.0 |
jseabold/statsmodels | statsmodels/emplike/tests/test_regression.py | 5 | 5787 | from numpy.testing import assert_almost_equal
import pytest
from statsmodels.regression.linear_model import OLS
from statsmodels.tools import add_constant
from .results.el_results import RegressionResults
from statsmodels.datasets import stackloss
class GenRes(object):
"""
Loads data and creates class instance ot be tested
"""
@classmethod
def setup_class(cls):
data = stackloss.load(as_pandas=False)
data.exog = add_constant(data.exog)
cls.res1 = OLS(data.endog, data.exog).fit()
cls.res2 = RegressionResults()
@pytest.mark.slow
class TestRegressionPowell(GenRes):
"""
All confidence intervals are tested by conducting a hypothesis
tests at the confidence interval values.
See Also
--------
test_descriptive.py, test_ci_skew
"""
@pytest.mark.slow
def test_hypothesis_beta0(self):
beta0res = self.res1.el_test([-30], [0], return_weights=1,
method='powell')
assert_almost_equal(beta0res[:2], self.res2.test_beta0[:2], 4)
assert_almost_equal(beta0res[2], self.res2.test_beta0[2], 4)
@pytest.mark.slow
def test_hypothesis_beta1(self):
beta1res = self.res1.el_test([.5], [1], return_weights=1,
method='powell')
assert_almost_equal(beta1res[:2], self.res2.test_beta1[:2], 4)
assert_almost_equal(beta1res[2], self.res2.test_beta1[2], 4)
def test_hypothesis_beta2(self):
beta2res = self.res1.el_test([1], [2], return_weights=1,
method='powell')
assert_almost_equal(beta2res[:2], self.res2.test_beta2[:2], 4)
assert_almost_equal(beta2res[2], self.res2.test_beta2[2], 4)
def test_hypothesis_beta3(self):
beta3res = self.res1.el_test([0], [3], return_weights=1,
method='powell')
assert_almost_equal(beta3res[:2], self.res2.test_beta3[:2], 4)
assert_almost_equal(beta3res[2], self.res2.test_beta3[2], 4)
# Confidence interval results obtained through hypothesis testing in Matlab
@pytest.mark.slow
def test_ci_beta0(self):
beta0ci = self.res1.conf_int_el(0, lower_bound=-52.9,
upper_bound=-24.1, method='powell')
assert_almost_equal(beta0ci, self.res2.test_ci_beta0, 3)
# Slightly lower precision. CI was obtained from nm method.
@pytest.mark.slow
def test_ci_beta1(self):
beta1ci = self.res1.conf_int_el(1, lower_bound=.418, upper_bound=.986,
method='powell')
assert_almost_equal(beta1ci, self.res2.test_ci_beta1, 4)
@pytest.mark.slow
def test_ci_beta2(self):
beta2ci = self.res1.conf_int_el(2, lower_bound=.59,
upper_bound=2.2, method='powell')
assert_almost_equal(beta2ci, self.res2.test_ci_beta2, 5)
@pytest.mark.slow
def test_ci_beta3(self):
beta3ci = self.res1.conf_int_el(3, lower_bound=-.39, upper_bound=.01,
method='powell')
assert_almost_equal(beta3ci, self.res2.test_ci_beta3, 6)
class TestRegressionNM(GenRes):
"""
All confidence intervals are tested by conducting a hypothesis
tests at the confidence interval values.
See Also
--------
test_descriptive.py, test_ci_skew
"""
def test_hypothesis_beta0(self):
beta0res = self.res1.el_test([-30], [0], return_weights=1,
method='nm')
assert_almost_equal(beta0res[:2], self.res2.test_beta0[:2], 4)
assert_almost_equal(beta0res[2], self.res2.test_beta0[2], 4)
def test_hypothesis_beta1(self):
beta1res = self.res1.el_test([.5], [1], return_weights=1,
method='nm')
assert_almost_equal(beta1res[:2], self.res2.test_beta1[:2], 4)
assert_almost_equal(beta1res[2], self.res2.test_beta1[2], 4)
@pytest.mark.slow
def test_hypothesis_beta2(self):
beta2res = self.res1.el_test([1], [2], return_weights=1,
method='nm')
assert_almost_equal(beta2res[:2], self.res2.test_beta2[:2], 4)
assert_almost_equal(beta2res[2], self.res2.test_beta2[2], 4)
@pytest.mark.slow
def test_hypothesis_beta3(self):
beta3res = self.res1.el_test([0], [3], return_weights=1,
method='nm')
assert_almost_equal(beta3res[:2], self.res2.test_beta3[:2], 4)
assert_almost_equal(beta3res[2], self.res2.test_beta3[2], 4)
# Confidence interval results obtained through hyp testing in Matlab
@pytest.mark.slow
def test_ci_beta0(self):
# All confidence intervals are tested by conducting a hypothesis
# tests at the confidence interval values since el_test
# is already tested against Matlab
#
# See Also
# --------
#
# test_descriptive.py, test_ci_skew
beta0ci = self.res1.conf_int_el(0, method='nm')
assert_almost_equal(beta0ci, self.res2.test_ci_beta0, 6)
@pytest.mark.slow
def test_ci_beta1(self):
beta1ci = self.res1.conf_int_el(1, method='nm')
assert_almost_equal(beta1ci, self.res2.test_ci_beta1, 6)
@pytest.mark.slow
def test_ci_beta2(self):
beta2ci = self.res1.conf_int_el(2, lower_bound=.59, upper_bound=2.2,
method='nm')
assert_almost_equal(beta2ci, self.res2.test_ci_beta2, 6)
@pytest.mark.slow
def test_ci_beta3(self):
beta3ci = self.res1.conf_int_el(3, method='nm')
assert_almost_equal(beta3ci, self.res2.test_ci_beta3, 6)
| bsd-3-clause |
roxyboy/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
hainm/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
WangWenjun559/Weiss | summary/sumy/sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| apache-2.0 |
rishikksh20/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 94 | 10801 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
def test_same_multiple_output_sparse_dense():
for normalize in [True, False]:
l = ElasticNet(normalize=normalize)
X = [[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14]]
y = [[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15]]
ignore_warnings(l.fit)(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet(normalize=normalize)
X_sp = sp.coo_matrix(X)
ignore_warnings(l_sp.fit)(X_sp, y)
sample_sparse = sp.coo_matrix(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
| bsd-3-clause |
bm2-lab/cage | src/core/seqfs/seqfeature_extractor.py | 1 | 3289 | from __future__ import division
import os
from cStringIO import StringIO
from pyfasta import Fasta
import pandas as pd
def __SeqRc(str_seq):
lst_rcseq = [c for c in str_seq[::-1]]
for i in xrange(len(lst_rcseq)):
if lst_rcseq[i] == 'A':
lst_rcseq[i] = 'T'
elif lst_rcseq[i] == 'T':
lst_rcseq[i] = 'A'
elif lst_rcseq[i] == 'C':
lst_rcseq[i] = 'G'
elif lst_rcseq[i] == 'G':
lst_rcseq[i] = 'C'
elif lst_rcseq[i] == 'N':
lst_rcseq[i] = 'N'
return ''.join(lst_rcseq)
def __CalculateGC(str_seq):
int_gc = 0
for c in str_seq:
if c in ('G', 'C'):
int_gc += 1
flt_gc = int_gc / len(str_seq)
return flt_gc
def __EncodeSeq(str_seq, dict_code):
lst_code = []
for c in str_seq:
lst_code.extend(dict_code[c])
return lst_code
def __FormulateFeature(lst_sg, fa_ref, dict_code, int_ups, int_dws):
idx_sgid = 0
idx_chr = 1
idx_strand = 2
idx_beg = 3
idx_end = 4
idx_seq = 5
lst_seq = [lst_sg[idx_sgid]]
str_ups =''
str_dws = ''
if lst_sg[idx_strand] == '-':
int_bups = int(lst_sg[idx_beg]) + int_ups
int_eups = int(lst_sg[idx_beg]) + 1
str_ups = fa_ref[lst_sg[idx_chr]][int_eups - 1:int_bups].upper()
str_ups = __SeqRc(str_ups)
int_bdws = int(lst_sg[idx_end]) - 1
int_edws = int(lst_sg[idx_end]) - int_dws
str_dws = fa_ref[lst_sg[idx_chr]][int_edws - 1:int_bdws].upper()
str_dws = __SeqRc(str_dws)
else:
int_bups = int(lst_sg[idx_beg]) - int_ups
int_eups = int(lst_sg[idx_beg]) - 1
str_ups = fa_ref[lst_sg[idx_chr]][int_bups - 1:int_eups].upper()
int_bdws = int(lst_sg[idx_end]) + 1
int_edws = int(lst_sg[idx_end]) + int_dws
str_dws = fa_ref[lst_sg[idx_chr]][int_bdws - 1:int_edws].upper()
lst_seq.extend(__EncodeSeq(str_ups, dict_code))
lst_seq.extend(__EncodeSeq(lst_sg[idx_seq], dict_code))
lst_seq.extend(__EncodeSeq(str_dws, dict_code))
return lst_seq
def ExtractSeqFeature(str_f_sg, str_refgem, int_ups, int_dws):
f_sg = open(str_f_sg, 'r')
gn_sg = (str_sg.strip().split('\t') for str_sg in f_sg if str_sg.strip() != '')
strio_seq = StringIO()
str_refpath = '%s/%s.fa'% (os.environ['FASTADB'], str_refgem)
fa_ref = Fasta(str_refpath)
dict_code = dict(N=['0','0','0','0'], A=['1','0','0','0'], C=['0','1','0','0'], G=['0','0','1','0'], T=['0','0','0','1'])
lst_code = ['A', 'C', 'G', 'T']
lst_seq = []
lst_header = ['sgID']
lst_header.extend(['ups_%d_%s'% (i, j) for i in range(int_ups, 0, -1) for j in lst_code])
lst_header.extend(['spa_%d_%s'% (i, j) for i in range(1, 21) for j in lst_code])
lst_header.extend(['pam_%d_%s'% (i, j) for i in range(1, 4) for j in lst_code])
lst_header.extend(['dws_%d_%s'% (i, j) for i in range(1, int_dws+1) for j in lst_code])
for lst_sg in gn_sg:
lst_seq = __FormulateFeature(lst_sg, fa_ref, dict_code, int_ups, int_dws)
strio_seq.write('\t'.join(lst_seq) + '\n')
dfm_seq = pd.read_csv(StringIO(strio_seq.getvalue()), header=None, sep='\t', index_col=None)
dfm_seq.columns = lst_header
strio_seq.close()
return dfm_seq
| mit |
jblackburne/scikit-learn | examples/classification/plot_lda_qda.py | 30 | 5150 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
alpha = 0.5
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', alpha=alpha,
color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '*', alpha=alpha,
color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', alpha=alpha,
color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '*', alpha=alpha,
color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, facecolor=color, edgecolor='yellow',
linewidth=2, zorder=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariances=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
subutai/nupic.research | nupic/research/frameworks/dendrites/mixins/sp_context_analysis.py | 2 | 5162 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import abc
import numpy as np
import torch
from sklearn.metrics import pairwise_distances
def dot_product_metric(x, y):
return x.dot(y)
class SpatialPoolerAnalysis(metaclass=abc.ABCMeta):
"""
Analyze the representations coming out of an untrained SP as possible context
vectors.
"""
def setup_experiment(self, config):
model_args = config.get("model_args")
self.dim_context = model_args.get("output_size")
super().setup_experiment(config)
# We're not going to train anything, but need this for k-winner duty
# cycle
self.model.train()
# Tensor for accumulating each task's centroid vector
self.contexts = torch.zeros((0, self.dim_context))
self.tasks = []
# We allow any metric specified in sklearn.metrics.pairwise_distances, plus
# dot product.
self.distance_metric = config.get("distance_metric", "dot")
def train_epoch(self):
"""Don't train anything"""
pass
def should_stop(self):
"""Stop after the first task."""
return self.current_task > 0
def validate(self, loader=None):
if loader is None:
loader = self.val_loader
contexts = torch.zeros((0, self.dim_context))
tasks = []
for task in range(self.num_tasks):
num_samples = 0
loader.sampler.set_active_tasks(task)
with torch.no_grad():
for data, _ in loader:
if isinstance(data, list):
data, context = data
data = data.flatten(start_dim=1)
data = data.to(self.device)
output = self.model(data)
self.optimizer.zero_grad()
num_samples += len(data)
contexts = torch.cat((contexts, output))
tasks.extend([task] * len(data))
if num_samples >= 1000:
break
self.tasks = np.array(tasks)
self.contexts = contexts.numpy()
print("Numpy contexts, tasks:",
self.contexts.shape, self.tasks.shape)
print("Duty cycle mean/min/max: ",
self.model.kw.duty_cycle.mean(), self.model.kw.duty_cycle.min(),
self.model.kw.duty_cycle.max())
separation = self.compute_distances()
entropy = float(self.model.kw.entropy())
return dict(entropy=entropy, mean_accuracy=separation)
def compute_distances(self):
"""
Compute the within-task distances and the across task distances of the
SP outputs. The method returns the 'separation', defined as the ratio
between the mean inter-class and intra-class distances. The higher this
number, the more separated the context vectors are.
"""
metric = self.distance_metric
if self.distance_metric == "dot":
metric = dot_product_metric
avg_dist = np.zeros((self.num_tasks, self.num_tasks))
stdev_dist = np.zeros((self.num_tasks, self.num_tasks))
for i in range(self.num_tasks):
for j in range(self.num_tasks):
distances = pairwise_distances(
self.contexts[self.tasks == i], self.contexts[self.tasks == j],
metric=metric)
avg_dist[i, j] = distances.mean()
stdev_dist[i, j] = distances.std()
# avg_dist /= avg_dist.max()
print("Distance matrix using metric:", self.distance_metric)
print(avg_dist)
diag_sum = np.trace(avg_dist)
diag_mean = diag_sum / avg_dist.shape[0]
num_off_diag_elements = avg_dist.size - avg_dist.shape[0]
off_diag_mean = (np.sum(avg_dist) - diag_sum) / num_off_diag_elements
if self.distance_metric == "dot":
# Want intra-class dot products to be higher than inter-class dot products
separation = diag_mean / off_diag_mean
else:
separation = off_diag_mean / diag_mean
print("Distances:", diag_mean, off_diag_mean, separation, self.contexts.size)
return separation
| agpl-3.0 |
franblas/sentiTweets | sentimentanalysis.py | 1 | 4158 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 05 09:09:09 2015
@author: Paco
"""
import pandas as pd
import nltk as nltk
from nltk.corpus import wordnet as wn
from sentiwordnet import SentiWordNetCorpusReader
from negmod import NegMod
#data import
data = pd.read_csv('testdata.manual.2009.06.14.csv',names=['popularity','id','date','request','user','text'])
dicoslang = pd.read_table('SlangLookupTable.txt',names=['abrev','expr'])
'''
Remove ponctuation from a text
'''
def remove_ponctuation(text):
temp = text
ponctuation = [',',';','?','!',':','.','^','*','-','(',')','/','=','_','[',']','{','}','"']
for p in ponctuation:
temp = temp.replace(p,'')
temp = temp.replace("'",'')
return temp
def is_url(text):
if text.startswith('http'):
return True
else:
return False
def is_hashtag(text):
if text.startswith('#'):
return True
else:
return False
def is_tweeter_user(text):
if text.startswith('@'):
return True
else:
return False
def is_retweet(text):
if text.lower() == 'rt':
return True
else:
return False
def is_empty(text):
if text == '':
return True
else:
return False
def is_special_chara(text):
if text.startswith('&') or text.startswith('$'):
return True
else:
return False
def replace_abrev(text,tokens):
temp = dicoslang['abrev'].tolist()
for ab in temp:
if text.lower() == ab:
exp = dicoslang['expr'][temp.index(ab)]
exp = exp.replace("'",' ')
arr = exp.split(' ')
indice = tokens.index(text)
for a in reversed(arr):
tokens.insert(indice+1,a)
tokens.pop(indice)
return tokens
def drop_stuff(tokens):
temp = tokens
for t in tokens:
t.replace('&','')
if (is_retweet(t) or is_special_chara(t) or is_url(t) or is_hashtag(t) or is_tweeter_user(t) or is_empty(t)):
temp[temp.index(t)]=''
temp = replace_abrev(t,temp)
temp = filter(lambda a: a != '', temp)
return temp
#Algo 1
def preProcessed(data):
temp = data['text'].tolist()
res = list()
for text in temp:
#remove ponctuation
text_1 = remove_ponctuation(text)
#tokenize
tokens = text_1.split(' ')
#clean text
tokens_1 = drop_stuff(tokens)
#add it to final list
res.append(tokens_1)
return res
# Algo 2
def etiqGrama(preProData):
res = list()
for d in preProData:
taggedData = nltk.pos_tag(d)
res.append(taggedData)
return res
# Algo 3
def scores(preProData,emot,sentifile='SentiWordNet_3.0.0_20130122.txt'):
swn = SentiWordNetCorpusReader(sentifile)
res = list()
bar = 0.0
nm = NegMod()
for tweet,emo in zip(preProData,emot):
print bar / float(len(preProData))
tweetneg = 0.0
tweetpos = 0.0
c = 0
for word in tweet:
try:
w = str(wn.synsets(word)[0].name())
temp = swn.senti_synset(w)
plop = 0.0
plopp = 0.0
# Negation et modifieurs
if c != 0:
if nm.neg_it(tweet[c-1]):#negation
tweetpos = temp[2]
tweetneg = temp[1]
break
if nm.mod_multiply(tweet[c-1]):#modifier
plop = temp[1]*2
plopp = temp[2]*2
else:
plop = temp[1]
plopp = temp[2]
else:
plop = temp[1]
plopp = temp[2]
tweetpos = tweetpos + plop
tweetneg = tweetneg + plopp
except:
pass
c = c + 1
# Add emot feeling
tweetpos = tweetpos + emo[0]
tweetneg = tweetneg + emo[1]
res.append((tweetpos,tweetneg))
bar = bar + 1.0
return res | mit |
EtienneCmb/tensorpac | examples/pac/plot_compare_surrogates.py | 1 | 3213 | """
====================================================
Compare methods to correct PAC for spurious coupling
====================================================
This example illustrates the different implemented methods in order to generate
the distribution of surrogates and then to correct the PAC for spurious
couplings. This includes :
* Swapping phase / amplitude trials (Tort et al. 2010,
:cite:`tort2010measuring`)
* Swapping amplitudes time blocks [RECOMMENDED] (Bahramisharif et al. 2013,
:cite:`bahramisharif2013propagating`, Aru et al. 2015,
:cite:`aru2015untangling`)
* Introducing a time lag on phase series (Canolty et al. 2006,
:cite:`canolty2006high`)
"""
import matplotlib.pyplot as plt
from tensorpac import Pac
from tensorpac.signals import pac_signals_wavelet
###############################################################################
# Simulate artificial coupling
###############################################################################
# first, we generate several trials that contains a coupling between a 6z phase
# and a 90hz amplitude. By default, the returned dataset is organized as
# (n_epochs, n_times) where n_times is the number of time points and n_epochs
# is the number of trials
f_pha = 6 # frequency phase for the coupling
f_amp = 70 # frequency amplitude for the coupling
n_epochs = 20 # number of trials
n_times = 4000 # number of time points
sf = 512. # sampling frequency
data, time = pac_signals_wavelet(sf=sf, f_pha=f_pha, f_amp=f_amp, noise=3.,
n_epochs=n_epochs, n_times=n_times)
###############################################################################
# Extract phases and amplitudes
###############################################################################
# now, we are going to extract all the phases and amplitudes. This is going to
# be useful then because it avoid to do it each time we're going to compute the
# PAC.
# define a :class:`tensorpac.Pac` object and use the MVL as the main method
# for measuring PAC
p = Pac(idpac=(1, 0, 0), f_pha=(3, 10, 1, .2), f_amp=(50, 90, 5, 1),
dcomplex='wavelet', width=12)
# Now, extract all of the phases and amplitudes
phases = p.filter(sf, data, ftype='phase')
amplitudes = p.filter(sf, data, ftype='amplitude')
###############################################################################
# Compute PAC and surrogates
###############################################################################
# now the phases and amplitudes are extracted, we can compute the true PAC such
# as the surrogates. Then the true value of PAC is going to be normalized using
# a z-score normalization and using the distribution of surrogates
plt.figure(figsize=(16, 12))
for i, k in enumerate(range(4)):
# change the pac method
p.idpac = (5, k, 1)
# compute only the pac without filtering
xpac = p.fit(phases, amplitudes, n_perm=20)
# plot
title = p.str_surro.replace(' (', '\n(')
plt.subplot(2, 2, k + 1)
p.comodulogram(xpac.mean(-1), title=title, cmap='Reds', vmin=0,
fz_labels=18, fz_title=20, fz_cblabel=18)
plt.tight_layout()
plt.show()
| bsd-3-clause |
jardians/sp17-i524 | project/S17-IO-3012/code/bin/benchmark_shards_import.py | 19 | 5376 | import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
"""retrieves mandatory parameter to program
@param: none
@type: n/a
"""
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
"""reads a file into a pandas dataframe
@param: filename The name of the file to read
@type: string
"""
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.mongo_version == 34]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if cloud != 'X':
benchmark_df = benchmark_df[benchmark_df.cloud == cloud]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
#benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()
#http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
benchmark_df = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
#http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
#print benchmark_df1['shard_replicas']
#print benchmark_df1
#print benchmark_df
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(find_seconds_kilo, shards_kilo, find_seconds_chameleon, shards_chameleon, find_seconds_jetstream, shards_jetstream):
"""formats and creates a line chart
@param1: find_seconds_kilo Array with find_seconds from kilo
@type: numpy array
@param2: shards_kilo Array with shards from kilo
@type: numpy array
@param3: find_seconds_chameleon Array with find_seconds from chameleon
@type: numpy array
@param4: shards_chameleon Array with shards from chameleon
@type: numpy array
"""
fig = plt.figure()
#plt.title('Average MongoImport Runtime with Various Numbers of Shards')
plt.ylabel('Runtime in Seconds')
plt.xlabel('Number of Shards')
# Make the chart
plt.plot(shards_kilo, find_seconds_kilo, label='Kilo Cloud')
plt.plot(shards_chameleon, find_seconds_chameleon, label='Chameleon Cloud')
plt.plot(shards_jetstream, find_seconds_jetstream, label='Jetstream Cloud')
#http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib
plt.ylim(ymin=0)
plt.legend(loc='best')
# Show the chart (for testing)
# plt.show()
# Save the chart
fig.savefig('../report/shard_import.png')
# Run the program by calling the functions
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
cloud = 'kilo'
config_replicas = 1
mongos_instances = 1
shard_replicas = 'X'
shards_per_replica = 1
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
#percentage death=\
find_seconds_kilo=select_df.as_matrix(columns=[select_df.columns[6]])
shards_kilo = select_df.as_matrix(columns=[select_df.columns[3]])
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'chameleon'
config_replicas = 1
mongos_instances = 1
shard_replicas = 'X'
shards_per_replica = 1
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
find_seconds_chameleon = select_df.as_matrix(columns=[select_df.columns[6]])
shards_chameleon = select_df.as_matrix(columns=[select_df.columns[3]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'jetstream'
config_replicas = 1
mongos_instances = 1
shard_replicas = 'X'
shards_per_replica = 1
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
find_seconds_jetstream = select_df.as_matrix(columns=[select_df.columns[6]])
shards_jetstream = select_df.as_matrix(columns=[select_df.columns[3]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
make_figure(find_seconds_kilo, shards_kilo, find_seconds_chameleon, shards_chameleon, find_seconds_jetstream, shards_jetstream)
| apache-2.0 |
htimko/ArcPIC | pic2d/tests/plotPicFieldTest.py | 1 | 12282 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# plotPicFieldTest.py
#
# Plots the PIC and direct field as well as their difference
# in r=z=0 as function of charge position
#
# Kyrre Sjøbæk, 2013
#
import sys, os, shutil
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
#from matplotlib.ticker import LogFormatter
#from matplotlib import gridspec
from matplotlib import rcParams
rcParams.update({'text.usetex': True})
print "Usage: {g|ng} {mirrorOrders=y|n} {ZR=z|r} (maxZidx|None) (hlineValue)"
print
if (len(sys.argv) < 4) != (len(sys.argv) > 6):
exit(1)
#Get the scaling setup
MODLOAD_parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,MODLOAD_parentdir)
from calcScaling import InputFile
inputfile = InputFile("../input.txt")
inputfile.calcBasicParams()
print
rFactor = 1e4*inputfile.dZ #output units -> um
idxList = []
izList = []
irList = []
rList = []
zList = []
fieldPICList = []
fieldDirectList = []
fieldDirect_orderList = []
useGrid = sys.argv[1]
if useGrid == 'g':
useGrid = True
elif useGrid == 'ng':
useGrid = False
else:
print "argument 1 (useGrid) should be 'g' or 'ng'"
exit(1)
mirrorOrders = sys.argv[2]
if mirrorOrders == 'y':
mirrorOrders = True
elif mirrorOrders == 'n':
mirrorOrders = False
else:
print "argument 2 (mirrorOrders) should be 'y' or 'n'"
exit(1)
ZR = sys.argv[3]
if not (ZR == 'z' or ZR == 'r'):
print "Argument 3 (ZR) should be 'z' or 'r', got '" + ZR + "'"
exit(1)
maxZidx = None
if len(sys.argv) == 5 or len(sys.argv) == 6:
if sys.argv[4] != "None":
maxZidx = int(sys.argv[4])
print "Using maxZidx =", maxZidx
print
hlineValue = None
if len(sys.argv) == 6:
hlineValue = float(sys.argv[5])
print "Using hlineValue =", hlineValue
print
picFieldTestFile = open("picFieldTest.dat", 'r')
picFieldTestFile.readline() #skip header
header2 = picFieldTestFile.readline()
h2 = header2.split()
assert len(h2) == 7
nzMax = int(h2[1].split("=")[1])
nrMax = int(h2[2].split("=")[1])
pointsPerGrid = int(h2[3].split("=")[1])
mirror_order = int(h2[4].split("=")[1])
zWitness = float(h2[5].split("=")[1])
rWitness = float(h2[6].split("=")[1])
for line in picFieldTestFile:
l = line.split()
if len(l) == 0:
continue
if maxZidx != None and int(l[1]) >= maxZidx:
print "stop"
break;
idxList.append( int(l[0]) )
izList.append( int(l[1]) )
irList.append( int(l[2]) )
zList.append( float(l[3]) )
rList.append( float(l[4]) )
if ZR == 'z':
fieldPICList.append( float(l[5]) )
elif ZR == 'r':
fieldPICList.append( float(l[6]) )
fieldDirectList.append( float(l[7]) )
fieldDirect_order = l[8:]
fieldDirect_orderList.append(map(float,fieldDirect_order))
picFieldTestFile.close()
print "Max iz =", max(izList), "(pass this-1 as argument maxZidx if crash)"
print
idxList = np.asarray(idxList)
izList = np.asarray(izList)
irList = np.asarray(irList)
zList = np.asarray(zList)
rList = np.asarray(rList)
fieldPICList = np.asarray(fieldPICList)
fieldDirectList = np.asarray(fieldDirectList)
fieldDirect_orderList = np.asarray(fieldDirect_orderList)
zMat = np.reshape(zList, (-1,nrMax*pointsPerGrid))
rMat = np.reshape(rList, (-1,nrMax*pointsPerGrid))
fieldPICMat = np.reshape(fieldPICList, (-1, nrMax*pointsPerGrid))
fieldDirectMat = np.reshape(fieldDirectList, (-1, nrMax*pointsPerGrid))
def plotGrid():
if not useGrid:
return
for ir in xrange(1, int(max(rList))+1 ):
plt.axhline(ir, color='k', ls='--')
for iz in xrange(1, int(max(zList))+1 ):
plt.axvline(iz, color='k', ls='--')
plt.plot(zWitness,rWitness, marker='*', ms=30)
fieldMax = max(np.nanmax(fieldPICMat), np.nanmax(fieldDirectMat))
fieldLevels_maxExp = int(np.log10(fieldMax))+1
fieldLevels_steps = fieldLevels_maxExp + 4 #starts at -3, all ints
fieldLevels_steps += fieldLevels_steps - 1 #add half-steps
fieldLevels = np.logspace(-3, fieldLevels_maxExp, fieldLevels_steps);
print "FieldLevels = "
print fieldLevels
print
fieldNegMax = max(np.nanmax(-fieldPICMat), np.nanmax(-fieldDirectMat))
fieldLevels_negMaxExp = int(np.log10(fieldNegMax))+1
fieldLevels_negSteps = fieldLevels_negMaxExp + 4 #starts at -3, all ints incl. 0
fieldLevels_negSteps += fieldLevels_negSteps - 1 #add half-steps
fieldLevels_neg = np.logspace(-3, fieldLevels_negMaxExp, fieldLevels_negSteps);
print "FieldLevels_neg = "
print fieldLevels_neg
print
plt.figure(1)
plt.contourf(zMat,rMat,fieldPICMat, 20);
plt.colorbar()
if hlineValue:
plt.axhline(hlineValue, color='k')
plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
plt.ylabel("r [dz=%f um]" % (inputfile.dZ*1e4,))
plt.title("PIC field [V/m]")
plotGrid()
plt.figure(2)
CS1 = plt.contourf(zMat,rMat,fieldPICMat,
levels=fieldLevels, norm=LogNorm());
plt.colorbar(CS1)
CS2 = plt.contourf(zMat,rMat,-fieldPICMat,
levels=fieldLevels_neg, norm=LogNorm(), hatches='x');
plt.colorbar(CS2)
if hlineValue:
plt.axhline(hlineValue, color='k')
plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
plt.ylabel("r [dz=%f um]" % (inputfile.dZ*1e4,))
plt.title("PIC field [V/m]")
plotGrid()
plt.figure(3)
plt.contourf(zMat,rMat,fieldDirectMat);
plt.colorbar()
if hlineValue:
plt.axhline(hlineValue, color='k')
plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
plt.ylabel("r [dz=%f um]" % (inputfile.dZ*1e4,))
plt.title("Analytical field [V/m]")
plotGrid()
plt.figure(4)
CS1 = plt.contourf(zMat,rMat,fieldDirectMat,
levels=fieldLevels, norm=LogNorm());
plt.colorbar(CS1)
CS2 = plt.contourf(zMat,rMat,-fieldDirectMat,
levels=fieldLevels_neg, norm=LogNorm(), hatches='x');
plt.colorbar(CS2)
if hlineValue:
plt.axhline(hlineValue, color='k')
plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
plt.ylabel("r [dz=%f um]" % (inputfile.dZ*1e4,))
plt.title("Analytical field [V/m]")
plotGrid()
deltaFieldMat = fieldPICMat - fieldDirectMat
plt.figure(5)
plt.contourf(zMat,rMat,deltaFieldMat);
plt.colorbar()
if hlineValue:
plt.axhline(hlineValue, color='k')
plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
plt.ylabel("r [dz=%f um]" % (inputfile.dZ*1e4,))
plt.title("PIC - analytical field [V/m]")
plotGrid()
plt.figure(6)
dfmLevels_max = np.nanmax(abs(deltaFieldMat))
dfmLevels_maxExp = int(np.log10(dfmLevels_max))+1
dfmLevels_steps = dfmLevels_maxExp + 4 #starts at -3, all ints
dfmLevels_steps += dfmLevels_steps - 1 #add half-steps
dfmLevels = np.logspace(-3, dfmLevels_maxExp, dfmLevels_steps);
print "dfmLevels = "
print dfmLevels
print
plt.contourf(zMat,rMat,-deltaFieldMat,
levels=dfmLevels,
norm=LogNorm(),
hatches="x");
plt.contourf(zMat,rMat,deltaFieldMat,
levels=dfmLevels,
norm=LogNorm());
if hlineValue:
plt.axhline(hlineValue, color='k')
#plt.contourf(zMat,rMat,abs(deltaFieldMat), 20, norm=LogNorm());
plt.colorbar()
#plt.contour(zMat,rMat,deltaFieldMat, [0.0], norm=LogNorm());
plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
plt.ylabel("r [dz=%f um]" % (inputfile.dZ*1e4,))
plt.title("PIC - analytical field [V/m]\n(PIC underestimated region hatched)")
plotGrid()
relDeltaFieldMat = deltaFieldMat / abs(fieldDirectMat)
plt.figure(7)
#relDeltaFieldMat_levels = np.linspace(min(relDeltaFieldMat),max(relDeltaFieldMat), 10)
#relDeltaFieldMat_levels = np.arange(-1.0,1.1, 0.1)
relDeltaFieldMat_levels = (-1.0,-0.8,-0.6,-0.4,-0.2,-0.1,-0.05, 0.0, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0)
print "relDeltaFieldMat_levels:"
print relDeltaFieldMat_levels
print
CS1 = plt.contourf(zMat,rMat,relDeltaFieldMat, relDeltaFieldMat_levels,extend='both');
plt.colorbar(CS1)
CS2 = plt.contour(zMat,rMat,relDeltaFieldMat, [-0.1,-0.05,0.0,0.05,0.1], colors='k');
plt.clabel(CS2,[-0.1,0.0,0.1])
if hlineValue:
plt.axhline(hlineValue, color='k')
plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
plt.ylabel("r [dz=%f um]" % (inputfile.dZ*1e4,))
plt.title("(PIC - analytical field) / abs(analytical field) [V/m]\nBlack lines drawn at 0.0, $\pm$0.05 and $\pm$0.1")
plotGrid()
# plt.figure(8)
# plt.plot(zList,rList, 'b.')
# plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
# plt.ylabel("r [dz=%f um]" % (inputfile.dZ*1e4,))
# plotGrid()
def extractRslice(rWanted):
indexList_wanted = []
for i in xrange(len(rList)):
if rList[i] == rWanted: #float stored and retrived without any operations in between
indexList_wanted.append(i)
zList_wanted = zList[indexList_wanted]
fieldPICList_wanted = fieldPICList[indexList_wanted]
fieldDirectList_wanted = fieldDirectList[indexList_wanted]
return (zList_wanted, fieldPICList_wanted, fieldDirectList_wanted)
RValues = sorted(list(set(rList)))
print "RValues = ", RValues
plt.figure(9)
print "Making linear plot..."
for r in RValues:
(zList_axis, fieldPICList_axis, fieldDirectList_axis) = extractRslice(r);
plt.plot(zList_axis, fieldPICList_axis, 'r-', label="PIC field")
plt.plot(zList_axis, fieldDirectList_axis, 'r--', label="Analytical field")
if useGrid:
for iz in xrange(1,int(max(zList_axis))+1):
print iz
plt.axvline(iz,color='k', ls='--')
plt.axvline(zWitness, color = 'g')
plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
plt.ylabel("Field [V/m]")
plt.figure(10)
print "Making log plot..."
for r in RValues:
(zList_axis, fieldPICList_axis, fieldDirectList_axis) = extractRslice(r);
plt.semilogy(zList_axis, fieldPICList_axis, 'r-')
plt.semilogy(zList_axis, -fieldPICList_axis, 'b-')
plt.semilogy(zList_axis, fieldDirectList_axis, 'r--')
plt.semilogy(zList_axis, -fieldDirectList_axis, 'b--')
if useGrid:
for iz in xrange(1,int(max(zList_axis))+1):
plt.axvline(iz,color='k', ls='--')
plt.axvline(zWitness, color = 'g')
plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
plt.ylabel("Field [V/m]")
print "done."
figureOffset = 11
order_last = fieldDirect_orderList[:,-1]
order_last = np.reshape(order_last,(-1, nrMax*pointsPerGrid))
order_fieldMax = np.nanmax(fieldDirect_orderList);
order_fieldNegMax = np.nanmax(-fieldDirect_orderList);
fieldLevels_posMaxExp = int(np.log10(order_fieldMax))+1
fieldLevels_posSteps = fieldLevels_maxExp + 4 #starts at -3, all ints
fieldLevels_posSteps += fieldLevels_posSteps - 1 #add half-steps
fieldLevels_pos = np.logspace(-3, fieldLevels_posMaxExp, fieldLevels_posSteps);
print "FieldLevels_pos = "
print fieldLevels_pos
print
fieldLevels_negMaxExp = int(np.log10(order_fieldNegMax))+1
fieldLevels_negSteps = fieldLevels_negMaxExp + 4 #starts at -3, all ints incl. 0
fieldLevels_negSteps += fieldLevels_negSteps - 1 #add half-steps
fieldLevels_neg = np.logspace(-3, fieldLevels_negMaxExp, fieldLevels_negSteps);
print "FieldLevels_neg = "
print fieldLevels_neg
print
if mirrorOrders:
for o in xrange(mirror_order+1):
order_this = fieldDirect_orderList[:,o]
order_this = np.reshape(order_this,(-1, nrMax*pointsPerGrid))
order_relDiff = (order_this-order_last) / abs(order_last)
plt.figure(figureOffset + o)
CS1 = plt.contourf(zMat,rMat, order_this,
norm=LogNorm(), levels=fieldLevels_pos)
plt.colorbar(CS1)
CS2 = plt.contourf(zMat,rMat, -order_this, hatches='x',
norm=LogNorm(), levels=fieldLevels_neg)
plt.colorbar(CS2)
if hlineValue:
plt.axhline(hlineValue, color='k')
plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
plt.ylabel("r [dz=%f um]" % (inputfile.dZ*1e4,))
plt.title("Analytical field, order " + str(o));
if o == mirror_order:
continue;
plt.figure(figureOffset+o+mirror_order+2)
plt.contourf(zMat,rMat, np.abs(order_relDiff), norm=LogNorm())
plt.colorbar()
if hlineValue:
plt.axhline(hlineValue, color='k')
plt.xlabel("z [dz=%f um]" % (inputfile.dZ*1e4,))
plt.ylabel("r [dz=%f um]" % (inputfile.dZ*1e4,))
plt.title("abs(order %i - order %i) / abs(order %i)" % (o, mirror_order, mirror_order))
plt.show()
| gpl-3.0 |
kashif/scikit-learn | examples/ensemble/plot_feature_transformation.py | 115 | 4327 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression()
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
wjlei1990/EarlyWarning | src/generate_measurements.py | 1 | 7779 | """
1) use the arrival time to generate windows
2) use the window and seismogram to generate measurements(inside windows)
"""
from __future__ import print_function, division
import os
import sys # NOQA
import scipy.stats
import numpy as np
import pandas as pd
import obspy
from obspy import UTCDateTime
import matplotlib.pyplot as plt
from utils import load_json
from metric import cut_window, tau_c, tau_p_max, \
make_disp_vel_acc_records
from metric2 import norm, envelope, split_accumul_data
def read_waveform(fn):
st = obspy.read(fn, format="MSEED")
return st
def measure_func(data, prefix):
# plt.plot(data)
# plt.xlabel(prefix)
# plt.show()
measure = {}
npts = len(data)
measure["%s.l1_norm" % prefix] = norm(data, ord=1) / npts
measure["%s.abs_l1_norm" % prefix] = norm(np.abs(data), ord=1) / npts
measure["%s.l2_norm" % prefix] = norm(data, ord=2) / npts
measure["%s.l4_norm" % prefix] = norm(data, ord=4) / npts
max_amp = np.max(np.abs(data))
measure["%s.max_amp" % prefix] = max_amp
max_amp_loc = np.argmax(np.abs(data)) / npts
measure["%s.max_amp_loc" % prefix] = max_amp_loc
measure["%s.max_amp_over_loc" % prefix] = max_amp / max_amp_loc
_, _, mean, var, skew, kurt = scipy.stats.describe(np.abs(data))
measure["%s.mean" % prefix] = mean
measure["%s.var" % prefix] = var
measure["%s.skew" % prefix] = skew
measure["%s.kurt" % prefix] = kurt
for perc in [25, 50, 75]:
measure["%s.%d_perc" % (prefix, perc)] = \
np.percentile(np.abs(data), perc)
return measure
def measure_on_trace_data_type(trace, data_type, window_split):
measure = {}
channel = trace.stats.channel
prefix = "%s.%s" % (channel, data_type)
measure.update(measure_func(trace.data, prefix))
data_split = split_accumul_data(trace.data, n=window_split)
for idx in range(len(data_split)):
prefix = "%s.%s.acumul_window_%d" % (channel, data_type, idx)
measure.update(measure_func(data_split[idx], prefix))
env_data = envelope(trace.data)
prefix = "%s.%s.env" % (channel, data_type)
measure.update(measure_func(env_data, prefix))
env_split = split_accumul_data(env_data, n=window_split)
for idx in range(len(data_split)):
prefix = "%s.%s.env.window_%d" % (channel, data_type, idx)
measure.update(measure_func(env_split[idx], prefix))
return measure
def plot_arrival_window(trace, windows, origin_time):
plt.plot(trace.data)
ax = plt.gca()
ymin, ymax = ax.get_ylim()
idx = (UTCDateTime(windows["pick_arrival"]) - trace.stats.starttime) / \
trace.stats.delta
plt.vlines([idx], ymin, ymax, linestyles="dotted", color='r')
idx = (UTCDateTime(windows["theo_arrival"]) - trace.stats.starttime) / \
trace.stats.delta
plt.vlines([idx], ymin, ymax, linestyles="dotted", color='b')
idx = (origin_time - trace.stats.starttime) / \
trace.stats.delta
plt.vlines([idx], ymin, ymax, linestyles="dotted", color='b')
plt.show()
def measure_tau_c(trace_types, window_split):
channel = trace_types["disp"].stats.channel
measure = {}
measure["%s.tau_c" % channel] = \
tau_c(trace_types["disp"].data, trace_types["vel"].data)
disp_split = split_accumul_data(trace_types["disp"].data, n=window_split)
vel_split = split_accumul_data(trace_types["vel"].data, n=window_split)
for idx in range(len(disp_split)):
measure["%s.tau_c.accumul_window_%d" % (channel, idx)] = \
tau_c(disp_split[idx], vel_split[idx])
return measure
def measure_on_trace(_trace, windows, src, win_len=3.0, window_split=6):
arrival = UTCDateTime(windows["pick_arrival"])
# plot_arrival_window(_trace, windows, UTCDateTime(src.time))
trace = cut_window(_trace, arrival, win_len)
print(trace)
measure = {}
_v = tau_p_max(trace)
channel = trace.stats.channel
measure["%s.tau_p_max" % channel] = _v["tau_p_max"]
trace_types = make_disp_vel_acc_records(trace)
measure.update(measure_tau_c(trace_types, window_split))
for dtype, data in trace_types.iteritems():
measure.update(measure_on_trace_data_type(data, dtype, window_split))
# print(measure)
# print("Number of features: %d" % len(measure))
# print(measure.keys())
return measure
def select_station_components(st, zchan):
"""
Select the 3-component traces from the same channel
"""
comps = ["Z", "N", "E"]
st_select = obspy.Stream()
for comp in comps:
chan_id = zchan[:-1] + comp
_st = st.select(id=chan_id)
if len(_st) == 0:
continue
st_select.append(_st[0])
return st_select
def measure_on_station_stream(src, st, chan_win):
measure = {}
for tr in st:
_m = measure_on_trace(tr, chan_win, src)
measure.update(_m)
print("Number of measurements in trace: %d " % len(_m))
# add common information
measure["distance"] = chan_win["distance"]
measure["channel"] = st[0].id
measure["source"] = "%s" % UTCDateTime(src.time)
measure["magnitude"] = src.mag
return measure
def measure_on_stream(src, waveform_file, window_file):
try:
st = read_waveform(waveform_file)
except Exception as err:
print("Error reading waveform(%s): %s" % (waveform_file, err))
return
try:
windows = load_json(window_file)
except Exception as err:
print("Error reading window file: %s" % err)
return
results = []
missing_stations = 0
for zchan, chan_win in windows.iteritems():
st_comp = select_station_components(st, zchan)
_nw = st_comp[0].stats.network
_sta = st_comp[0].stats.station
print("-" * 10 + " station: %s.%s " % (_nw, _sta) + "-" * 10)
if len(st_comp) != 3:
missing_stations += 1
continue
try:
measure = measure_on_station_stream(src, st_comp, chan_win)
results.append(measure)
print("Number of measurements in station stream: %d"
% len(measure))
except Exception as err:
print("Failed to process data due to: %s" % err)
return {"measure": results, "missing_stations": missing_stations}
def save_measurements(results, fn):
data = {}
for k in results[0]:
data[k] = []
for d in results:
for k, v in d.iteritems():
data[k].append(v)
df = pd.DataFrame(data)
print("Save features to file: %s" % fn)
df.to_csv(fn)
def main():
sources = pd.read_csv("../data/source.csv")
sources.sort_values("time", ascending=False, inplace=True)
# stations = pd.read_csv("../data/station.csv")
waveform_base = "../../proc"
window_base = "../../arrivals"
nsources = len(sources)
results = []
missing_stations = 0
for idx in range(nsources):
src = sources.loc[idx]
# if src.mag < 3.2:
# continue
origin_time = obspy.UTCDateTime(src.time)
print("=" * 10 + " [%d/%d]Source(%s, mag=%.2f, dep=%.2f km) "
% (idx + 1, nsources, origin_time, src.mag, src.depth) +
"=" * 10)
waveform_file = os.path.join(
waveform_base, "%s" % origin_time, "CI.mseed")
window_file = os.path.join(window_base, "%s.json" % origin_time)
_m = measure_on_stream(src, waveform_file, window_file)
if _m is not None:
results.extend(_m["measure"])
missing_stations += _m["missing_stations"]
print(" *** Missing stations in total: %d ***" % missing_stations)
save_measurements(results, "../../measurements.csv")
if __name__ == "__main__":
main()
| gpl-3.0 |
mehdidc/scikit-learn | sklearn/neighbors/approximate.py | 3 | 21294 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
distances = pairwise_distances(query, self._fit_X[candidates],
metric='cosine')[0]
distance_positions = np.argsort(distances)
return distance_positions, distances[distance_positions]
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""
Returns the n_number of approximated nearest neighbors
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""
Returns the approximated nearest neighbors within the radius
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximated nearest points
with in the `radius` to the query in the population matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
dr-rodriguez/The-Divided-States-of-America | analysis.py | 1 | 12719 | from nltk.corpus import stopwords
from nltk.tokenize import wordpunct_tokenize
from nltk.stem.porter import PorterStemmer
from collections import Counter
import string
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.cross_validation import train_test_split
from sklearn import svm, grid_search
import matplotlib.pyplot as plt
from math import sqrt
import seaborn as sns
# Set style for seaborn
sns.set_style('whitegrid')
# TODO: Look into adding some form of sentiment analysis
class Analyzer:
"""
Class for carrying out the analysis and model creation/loading
"""
def __init__(self, data, labels=None, max_words=150, load_pca=False, load_svm=False, more_stop_words=[''],
use_sentiment=True):
self.data = data # Data matrix
self.labels = labels # Label array
# Text Mining
self.max_words = max_words
self.dtm = []
self.top_words = dict()
self.words = Counter()
self.more_stop_words = more_stop_words
# Principal Component Analysis
self.load_pca = load_pca # Load or compute the PCA?
self.pca = None
self.pcscores = None
self.loadings = None
self.load_squared = None
# Sentiment analysis
self.sentiment = None
self.use_sentiment = use_sentiment
# Support Vector Machine Classifier
self.load_svm = load_svm
self.svc = None
# Use stemming
self.porter = PorterStemmer()
# Set stop words
self.stop_words = set(stopwords.words('english'))
self.stop_words.update([s for s in string.punctuation] +
[u'\u2014', u'\u2019', u'\u201c', u'\xf3', u'\u201d', u'\u2014@', u'://', u'!"', u'"@',
u'."', u'.@', u'co', u'\u2026', u'&', u'&', u'amp', u'...', u'.\u201d', u'000',
u'\xed'])
# Political terms and Twitter handles to remove
self.stop_words.update(['hillary', 'clinton', 'donald', 'trump', 'clinton2016',
'trump2016', 'hillary2016', 'makeamericagreatagain'])
self.stop_words.update(['realdonaldtrump', 'hillaryclinton', 'berniesanders'])
self.stop_words.update(self.more_stop_words)
def create_full_model(self):
print('Getting top {} words...'.format(self.max_words))
self.get_words()
print('Creating document term matrix...')
self.create_dtm()
print('Running Principal Component Analysis...')
self.run_pca()
if self.use_sentiment:
print('Running Sentiment Analysis...')
self.get_sentiment()
print('Running Support Vector Machine Classifier...')
return self.run_svm()
def load_full_model(self):
self.load_words()
self.create_dtm()
self.run_pca()
if self.use_sentiment: self.get_sentiment()
return self.run_svm()
def get_words(self):
str_list = ' '.join([tweet for tweet in self.data])
self.words = Counter([self.porter.stem(i.lower()) for i in wordpunct_tokenize(str_list)
if i.lower() not in self.stop_words and not i.lower().startswith('http')])
self.top_words = dict(self.words.most_common(self.max_words))
def save_words(self, filename='words.pkl'):
joblib.dump(self.top_words, 'model/'+filename)
def load_words(self, filename='words.pkl'):
print('Loading model/{}'.format(filename))
self.top_words = joblib.load('model/'+filename)
def create_dtm(self):
dtm = []
for tweet in self.data:
# Make empty row
newrow = dict()
for term in self.top_words.keys():
newrow[term] = 0
tweetwords = [self.porter.stem(i.lower()) for i in wordpunct_tokenize(tweet)
if i.lower() not in self.stop_words and not i.lower().startswith('http')]
for word in tweetwords:
if word in self.top_words.keys():
newrow[word] += 1
dtm.append(newrow)
self.dtm = dtm
def get_sentiment(self):
# Load up the NRC emotion lexicon
filename = 'data/NRC-emotion-lexicon-wordlevel-alphabetized-v0.92.txt'
data = pd.read_csv(filename, delim_whitespace=True, skiprows=45, header=None, names=['word', 'affect', 'flag'])
emotion_words = dict()
emotion_map = dict()
affects = ['positive', 'negative', 'anger', 'anticipation', 'disgust',
'fear', 'joy', 'sadness', 'surprise', 'trust']
for key in affects:
emotion_words[key] = data[(data['affect'] == key) & (data['flag'] == 1)]['word'].tolist()
emotion_map[key] = list()
for text in self.data: # Note no stemming or it may fail to match words
words = Counter([i.lower() for i in wordpunct_tokenize(text)
if i.lower() not in self.stop_words and not i.lower().startswith('http')])
for key in emotion_words.keys():
x = set(emotion_words[key]).intersection(words.keys())
emotion_map[key].append(len(x))
self.sentiment = pd.DataFrame(emotion_map)
def run_pca(self, filename='pca.pkl'):
df_dtm = pd.DataFrame(self.dtm, columns=self.top_words.keys())
# Load or run the PCA
if self.load_pca:
print('Loading model/{}'.format(filename))
pca = joblib.load('model/'+filename)
else:
pca = PCA(n_components=0.8)
pca.fit(df_dtm)
pcscores = pd.DataFrame(pca.transform(df_dtm))
pcscores.columns = ['PC' + str(i + 1) for i in range(pcscores.shape[1])]
loadings = pd.DataFrame(pca.components_, columns=self.top_words.keys())
load_squared = loadings.transpose() ** 2
load_squared.columns = ['PC' + str(i + 1) for i in range(pcscores.shape[1])]
self.pcscores = pcscores
self.loadings = loadings
self.load_squared = load_squared
# Prep for save, just in case
self.pca = pca
def save_pca(self, filename='pca.pkl'):
joblib.dump(self.pca, 'model/' + filename)
def run_svm(self, filename='svm.pkl'):
if not self.load_svm:
if self.use_sentiment:
self.pcscores.index = range(len(self.pcscores))
data = pd.concat([self.pcscores, self.sentiment], axis=1)
else:
data = self.pcscores
df_train, df_test, train_label, test_label = train_test_split(data, self.labels,
test_size=0.2, random_state=42)
parameters = {'kernel': ['rbf'], 'C': [0.01, 0.1, 1, 10, 100], 'gamma': [1e-3, 1e-2, 1e-1, 1e0, 1e1]}
svr = svm.SVC()
clf = grid_search.GridSearchCV(svr, parameters, cv=5, error_score=0, verbose=1)
clf.fit(df_train, train_label)
print('Best parameters: {}'.format(clf.best_params_))
prediction = clf.predict(df_test)
self.svc = clf
return prediction, test_label
else:
print('Loading model/{}'.format(filename))
clf = joblib.load('model/'+filename)
if self.use_sentiment:
self.pcscores.index = range(len(self.pcscores))
data = pd.concat([self.pcscores, self.sentiment], axis=1)
else:
data = self.pcscores
prediction = clf.predict(data)
self.svc = clf
return prediction
def save_svm(self, filename='svm.pkl'):
joblib.dump(self.svc, 'model/' + filename)
def make_confusion_matrix(self, test_label, test_predict, normalize=False, axis=0, label_names=['Hillary', 'Trump']):
cm = confusion_matrix(test_label, test_predict)
if normalize:
cm = cm.astype('float') / cm.sum(axis=axis)[:, np.newaxis]
pretty_cm(cm)
print(classification_report(test_label, test_predict, target_names=label_names))
return cm
def make_biplot(self, xval=0, yval=1, max_arrow=0.2, save=False, alpha=0.4, use_sns=False):
"""
Create a biplot of the PCA components
:param xval: PCA component for the x-axis
:param yval: PCA component for the y-axis
:param max_arrow: Scaling to control how many arrows are plotted
:param save: Filename or False if no save needed
:param alpha: Transparency
:return:
"""
# Check if pca has been run
if self.pcscores is None:
print('Run PCA first')
return
n = self.loadings.shape[1]
scalex = 1.0 / (self.pcscores.iloc[:, xval].max() - self.pcscores.iloc[:, xval].min()) # Rescaling to be from -1 to +1
scaley = 1.0 / (self.pcscores.iloc[:, yval].max() - self.pcscores.iloc[:, yval].min())
if use_sns:
# Use seaborn
cut = 120
g = sns.JointGrid(x=self.pcscores.iloc[:, xval][self.labels == 0] * scalex,
y=self.pcscores.iloc[:, yval][self.labels == 0] * scaley)
g.plot_joint(plt.scatter, c='blue', label='Hillary Clinton', alpha=alpha)
g.plot_marginals(sns.kdeplot, shade=True, color='blue', cut=cut)
# g.plot_marginals(sns.distplot, color='blue')
g.x = self.pcscores.iloc[:, xval][self.labels == 1] * scalex
g.y = self.pcscores.iloc[:, yval][self.labels == 1] * scaley
g.plot_joint(plt.scatter, c='red', label='Donald Trump', alpha=alpha)
plt.legend(loc='best')
g.plot_marginals(sns.kdeplot, shade=True, color='red', legend=False, cut=cut)
# g.plot_marginals(sns.distplot, color='red')
g.x, g.y = [], []
g.plot_joint(plt.scatter)
# Draw arrows
for i in range(n):
# Only plot the longer ones
length = sqrt(self.loadings.iloc[xval, i] ** 2 + self.loadings.iloc[yval, i] ** 2)
if length < max_arrow:
continue
plt.arrow(0, 0, self.loadings.iloc[xval, i], self.loadings.iloc[yval, i], color='g', alpha=0.9)
plt.text(self.loadings.iloc[xval, i] * 1.15, self.loadings.iloc[yval, i] * 1.15,
self.loadings.columns.tolist()[i], color='k', ha='center', va='center')
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('PC{}'.format(xval + 1))
plt.ylabel('PC{}'.format(yval + 1))
if save: g.savefig(save)
plt.show()
else:
plt.figure()
if self.labels is not None:
plt.plot(self.pcscores.iloc[:, xval][self.labels == 0] * scalex, self.pcscores.iloc[:, yval][self.labels == 0] * scaley,
'bo', alpha=alpha, label='Hillary Clinton')
plt.plot(self.pcscores.iloc[:, xval][self.labels == 1] * scalex, self.pcscores.iloc[:, yval][self.labels == 1] * scaley,
'ro', alpha=alpha, label='Donald Trump')
else:
plt.plot(self.pcscores.iloc[:, xval] * scalex, self.pcscores.iloc[:, yval] * scaley,
'bo', alpha=alpha)
for i in range(n):
# Only plot the longer ones
length = sqrt(self.loadings.iloc[xval, i]**2 + self.loadings.iloc[yval, i]**2)
if length < max_arrow:
continue
plt.arrow(0, 0, self.loadings.iloc[xval, i], self.loadings.iloc[yval, i], color='g', alpha=0.9)
plt.text(self.loadings.iloc[xval, i] * 1.15, self.loadings.iloc[yval, i] * 1.15,
self.loadings.columns.tolist()[i], color='k', ha='center', va='center')
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('PC{}'.format(xval+1))
plt.ylabel('PC{}'.format(yval+1))
if self.labels is not None: plt.legend(loc='best', numpoints=1)
plt.grid()
if save: plt.savefig(save)
plt.show()
def pretty_cm(cm, label_names=['Hillary', 'Trump'], show_sum=False):
table = pd.DataFrame(cm, columns=['P-' + s for s in label_names], index=['T-' + s for s in label_names])
print(table)
if show_sum:
print('Sum of columns: {}'.format(cm.sum(axis=0)))
print('Sum of rows: {}'.format(cm.sum(axis=1)))
print('')
| mit |
lucidfrontier45/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 3 | 5823 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in xrange(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in xrange(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
huzq/scikit-learn | maint_tools/sort_whats_new.py | 28 | 1251 | #!/usr/bin/env python
# Sorts what's new entries with per-module headings.
# Pass what's new entries on stdin.
import sys
import re
from collections import defaultdict
LABEL_ORDER = ['MajorFeature', 'Feature', 'Enhancement', 'Efficiency',
'Fix', 'API']
def entry_sort_key(s):
if s.startswith('- |'):
return LABEL_ORDER.index(s.split('|')[1])
else:
return -1
# discard headings and other non-entry lines
text = ''.join(l for l in sys.stdin
if l.startswith('- ') or l.startswith(' '))
bucketed = defaultdict(list)
for entry in re.split('\n(?=- )', text.strip()):
modules = re.findall(r':(?:func|meth|mod|class):'
r'`(?:[^<`]*<|~)?(?:sklearn.)?([a-z]\w+)',
entry)
modules = set(modules)
if len(modules) > 1:
key = 'Multiple modules'
elif modules:
key = ':mod:`sklearn.%s`' % next(iter(modules))
else:
key = 'Miscellaneous'
bucketed[key].append(entry)
entry = entry.strip() + '\n'
everything = []
for key, bucket in sorted(bucketed.items()):
everything.append(key + '\n' + '.' * len(key))
bucket.sort(key=entry_sort_key)
everything.extend(bucket)
print('\n\n'.join(everything))
| bsd-3-clause |
kernc/scikit-learn | examples/mixture/plot_gmm_covariances.py | 13 | 4262 | """
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.model_selection import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((covar_type,
GMM(n_components=n_classes, covariance_type=covar_type,
init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators / 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
ibmsoe/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 85 | 4704 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
| apache-2.0 |
cgre-aachen/gempy | gempy/utils/export.py | 1 | 4339 |
from matplotlib.cm import ScalarMappable as SM
from gempy.plot._visualization_2d import PlotData2D
import numpy as np
import os
def export_geomap2geotiff(path, geo_model, geo_map=None, geotiff_filepath=None):
"""
Args:
path (str): Filepath for the exported geotiff, must end in .tif
geo_map (np.ndarray): 2-D array containing the geological map
cmap (matplotlib colormap): The colormap to be used for the export
geotiff_filepath (str): Filepath of the template geotiff
Returns:
Saves the geological map as a geotiff to the given path.
"""
from osgeo import gdal
plot = PlotData2D(geo_model)
cmap = plot._cmap
norm = plot._norm
if geo_map is None:
geo_map = geo_model.solutions.geological_map[0].reshape(geo_model._grid.topography.resolution)
if geotiff_filepath is None:
# call the other function
print('stupid')
# **********************************************************************
geo_map_rgb = SM(norm=norm, cmap=cmap).to_rgba(geo_map.T) # r,g,b,alpha
# **********************************************************************
# gdal.UseExceptions()
ds = gdal.Open(geotiff_filepath)
band = ds.GetRasterBand(1)
arr = band.ReadAsArray()
[cols, rows] = arr.shape
outFileName = path
driver = gdal.GetDriverByName("GTiff")
options = ['PROFILE=GeoTiff', 'PHOTOMETRIC=RGB', 'COMPRESS=JPEG']
outdata = driver.Create(outFileName, rows, cols, 3, gdal.GDT_Byte, options=options)
outdata.SetGeoTransform(ds.GetGeoTransform()) # sets same geotransform as input
outdata.SetProjection(ds.GetProjection()) # sets same projection as input
outdata.GetRasterBand(1).WriteArray(geo_map_rgb[:, ::-1, 0].T * 256)
outdata.GetRasterBand(2).WriteArray(geo_map_rgb[:, ::-1, 1].T * 256)
outdata.GetRasterBand(3).WriteArray(geo_map_rgb[:, ::-1, 2].T * 256)
outdata.GetRasterBand(1).SetColorInterpretation(gdal.GCI_RedBand)
outdata.GetRasterBand(2).SetColorInterpretation(gdal.GCI_GreenBand)
outdata.GetRasterBand(3).SetColorInterpretation(gdal.GCI_BlueBand)
# outdata.GetRasterBand(4).SetColorInterpretation(gdal.GCI_AlphaBand) # alpha band
# outdata.GetRasterBand(1).SetNoDataValue(999)##if you want these values transparent
outdata.FlushCache() # saves to disk
outdata = None # closes file (important)
band = None
ds = None
print("Successfully exported geological map to " +path)
def export_moose_input(geo_model, path=None, filename='geo_model_units_moose_input.i'):
"""
Method to export a 3D geological model as MOOSE compatible input.
Args:
path (str): Filepath for the exported input file
filename (str): name of exported input file
Returns:
"""
# get model dimensions
nx, ny, nz = geo_model.grid.regular_grid.resolution
xmin, xmax, ymin, ymax, zmin, zmax = geo_model.solutions.grid.regular_grid.extent
# get unit IDs and restructure them
ids = np.round(geo_model.solutions.lith_block)
ids = ids.astype(int)
liths = ids.reshape((nx, ny, nz))
liths = liths.flatten('F')
# create unit ID string for the fstring
idstring = '\n '.join(map(str, liths))
# create a dictionary with unit names and corresponding unit IDs
sids = dict(zip(geo_model._surfaces.df['surface'], geo_model._surfaces.df['id']))
surfs = list(sids.keys())
uids = list(sids.values())
# create strings for fstring, so in MOOSE, units have a name instead of an ID
surfs_string = ' '.join(surfs)
ids_string = ' '.join(map(str, uids))
fstring = f"""[MeshGenerators]
[./gmg]
type = GeneratedMeshGenerator
dim = 3
nx = {nx}
ny = {ny}
nz = {nz}
xmin = {xmin}
xmax = {xmax}
yim = {ymin}
ymax = {ymax}
zmin = {zmin}
zmax = {zmax}
block_id = '{ids_string}'
block_name = '{surfs_string}'
[../]
[./subdomains]
type = ElementSubdomainIDGenerator
input = gmg
subdomain_ids = '{idstring}'
[../]
[]
[Mesh]
type = MeshGeneratorMesh
[]
"""
if not path:
path = './'
if not os.path.exists(path):
os.makedirs(path)
f = open(path+filename, 'w+')
f.write(fstring)
f.close()
print("Successfully exported geological model as moose input to "+path) | lgpl-3.0 |
lizardsystem/controlnext | controlnext/management/notebook_extension.py | 1 | 3200 | def load_ipython_extension(ipython):
import sys
sys.path[0:0] = [
'/home/jsmits/Development/repos/delfland',
'/home/jsmits/.buildout/eggs/djangorecipe-1.3-py2.7.egg',
'/home/jsmits/.buildout/eggs/Django-1.4.3-py2.7.egg',
'/home/jsmits/.buildout/eggs/zc.recipe.egg-1.2.2-py2.7.egg',
'/home/jsmits/.buildout/eggs/zc.buildout-1.4.4-py2.7.egg',
'/usr/lib/python2.7/dist-packages',
'/home/jsmits/.buildout/eggs/simplejson-2.4.0-py2.7-linux-x86_64.egg',
'/home/jsmits/.buildout/eggs/Werkzeug-0.8.3-py2.7.egg',
'/home/jsmits/.buildout/eggs/python_memcached-1.48-py2.7.egg',
'/home/jsmits/.buildout/eggs/lizard_ui-4.16-py2.7.egg',
'/home/jsmits/.buildout/eggs/lizard_map-4.16-py2.7.egg',
'/home/jsmits/Development/repos/delfland/src/controlnext',
'/home/jsmits/.buildout/eggs/gunicorn-0.13.4-py2.7.egg',
'/home/jsmits/.buildout/eggs/django_nose-1.1-py2.7.egg',
'/home/jsmits/.buildout/eggs/django_extensions-1.0.1-py2.7.egg',
'/home/jsmits/.buildout/eggs/django_celery-3.0.11-py2.7.egg',
'/home/jsmits/.buildout/eggs/South-0.7.6-py2.7.egg',
'/home/jsmits/.buildout/eggs/raven-2.0.3-py2.7.egg',
'/home/jsmits/.buildout/eggs/lizard_security-0.5-py2.7.egg',
'/home/jsmits/.buildout/eggs/docutils-0.8-py2.7.egg',
'/home/jsmits/.buildout/eggs/django_compressor-1.2-py2.7.egg',
'/home/jsmits/.buildout/eggs/django_staticfiles-1.2.1-py2.7.egg',
'/home/jsmits/.buildout/eggs/BeautifulSoup-3.2.1-py2.7.egg',
'/home/jsmits/.buildout/eggs/pkginfo-0.8-py2.7.egg',
'/home/jsmits/.buildout/eggs/mock-0.8.0-py2.7.egg',
'/home/jsmits/.buildout/eggs/lizard_help-0.4-py2.7.egg',
'/home/jsmits/.buildout/eggs/iso8601-0.1.4-py2.7.egg',
'/home/jsmits/.buildout/eggs/djangorestframework-2.1.12-py2.7.egg',
'/home/jsmits/.buildout/eggs/django_piston-0.2.2-py2.7.egg',
'/home/jsmits/.buildout/eggs/django_jsonfield-0.8.11-py2.7.egg',
'/home/jsmits/.buildout/eggs/Pillow-1.7.7-py2.7-linux-x86_64.egg',
'/home/jsmits/.buildout/eggs/factory_boy-1.2.0-py2.7.egg',
'/home/jsmits/.buildout/eggs/pandas-0.10.1-py2.7-linux-x86_64.egg',
'/home/jsmits/.buildout/eggs/lizard_fewsjdbc-2.14-py2.7.egg',
'/home/jsmits/.buildout/eggs/celery-3.0.11-py2.7.egg',
'/home/jsmits/.buildout/eggs/django_tls-0.0.2-py2.7.egg',
'/home/jsmits/.buildout/eggs/django_appconf-0.5-py2.7.egg',
'/home/jsmits/.buildout/eggs/lizard_task-0.15-py2.7.egg',
'/home/jsmits/.buildout/eggs/kombu-2.4.7-py2.7.egg',
'/home/jsmits/.buildout/eggs/billiard-2.7.3.18-py2.7-linux-x86_64.egg',
'/home/jsmits/.buildout/eggs/amqplib-1.0.2-py2.7.egg',
'/home/jsmits/.buildout/eggs/anyjson-0.3.3-py2.7.egg',
'/home/jsmits/Development/repos/delfland',
]
from django.core.management.color import no_style
from django_extensions.management.shells import import_objects
imported_objects = import_objects(options={'dont_load': []},
style=no_style())
print imported_objects
ipython.push(imported_objects)
| gpl-3.0 |
yunque/sms-tools | lectures/06-Harmonic-model/plots-code/f0-TWM-errors-1.py | 22 | 3586 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackman
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
def TWM (pfreq, pmag, maxnpeaks, f0c):
# Two-way mismatch algorithm for f0 detection (by Beauchamp&Maher)
# pfreq, pmag: peak frequencies in Hz and magnitudes, maxnpeaks: maximum number of peaks used
# f0cand: frequencies of f0 candidates
# returns f0: fundamental frequency detected
p = 0.5 # weighting by frequency value
q = 1.4 # weighting related to magnitude of peaks
r = 0.5 # scaling related to magnitude of peaks
rho = 0.33 # weighting of MP error
Amax = max(pmag) # maximum peak magnitude
harmonic = np.matrix(f0c)
ErrorPM = np.zeros(harmonic.size) # initialize PM errors
MaxNPM = min(maxnpeaks, pfreq.size)
for i in range(0, MaxNPM) : # predicted to measured mismatch error
difmatrixPM = harmonic.T * np.ones(pfreq.size)
difmatrixPM = abs(difmatrixPM - np.ones((harmonic.size, 1))*pfreq)
FreqDistance = np.amin(difmatrixPM, axis=1) # minimum along rows
peakloc = np.argmin(difmatrixPM, axis=1)
Ponddif = np.array(FreqDistance) * (np.array(harmonic.T)**(-p))
PeakMag = pmag[peakloc]
MagFactor = 10**((PeakMag-Amax)/20)
ErrorPM = ErrorPM + (Ponddif + MagFactor*(q*Ponddif-r)).T
harmonic = harmonic+f0c
ErrorMP = np.zeros(harmonic.size) # initialize MP errors
MaxNMP = min(10, pfreq.size)
for i in range(0, f0c.size) : # measured to predicted mismatch error
nharm = np.round(pfreq[:MaxNMP]/f0c[i])
nharm = (nharm>=1)*nharm + (nharm<1)
FreqDistance = abs(pfreq[:MaxNMP] - nharm*f0c[i])
Ponddif = FreqDistance * (pfreq[:MaxNMP]**(-p))
PeakMag = pmag[:MaxNMP]
MagFactor = 10**((PeakMag-Amax)/20)
ErrorMP[i] = sum(MagFactor * (Ponddif + MagFactor*(q*Ponddif-r)))
Error = (ErrorPM[0]/MaxNPM) + (rho*ErrorMP/MaxNMP) # total error
f0index = np.argmin(Error) # get the smallest error
f0 = f0c[f0index] # f0 with the smallest error
return f0, ErrorPM, ErrorMP, Error
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 1024
hN = N/2
M = 801
t = -40
start = .8*fs
minf0 = 100
maxf0 = 1500
w = blackman (M)
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs * iploc/N
f0cand = np.arange(minf0, maxf0, 1.0)
maxnpeaks = 10
f0, ErrorPM, ErrorMP, Error = TWM (ipfreq, ipmag, maxnpeaks, f0cand)
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9, 7))
plt.subplot (2,1,1)
plt.plot(freqaxis,mX,'r', lw=1.5)
plt.axis([100,5100,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(f0cand,ErrorPM[0], 'b', label = 'ErrorPM', lw=1.2)
plt.plot(f0cand,ErrorMP, 'g', label = 'ErrorMP', lw=1.2)
plt.plot(f0cand,Error, color='black', label = 'Error Total', lw=1.5)
plt.axis([minf0,maxf0,min(Error),130])
plt.legend()
plt.title('TWM Errors')
plt.tight_layout()
plt.savefig('f0-TWM-errors-1.png')
plt.show()
| agpl-3.0 |
vorwerkc/pymatgen | pymatgen/apps/battery/plotter.py | 5 | 7178 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides plotting capabilities for battery related applications.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Jul 12, 2012"
from collections import OrderedDict
import plotly.graph_objects as go
from pymatgen.util.plotting import pretty_plot
class VoltageProfilePlotter:
"""
A plotter to make voltage profile plots for batteries.
"""
def __init__(self, xaxis="capacity", hide_negative=False):
"""
Args:
xaxis: The quantity to use as the xaxis. Can be either
- capacity_grav: the graviometric capcity
- capacity_vol: the volumetric capacity
- x_form: the number of working ions per formula unit of the host
- frac_x: the atomic fraction of the working ion
hide_negative: If True only plot the voltage steps above zero
"""
self._electrodes = OrderedDict()
self.xaxis = xaxis
self.hide_negative = hide_negative
def add_electrode(self, electrode, label=None):
"""
Add an electrode to the plot.
Args:
electrode: An electrode. All electrodes satisfying the
AbstractElectrode interface should work.
label: A label for the electrode. If None, defaults to a counting
system, i.e. 'Electrode 1', 'Electrode 2', ...
"""
if not label:
label = "Electrode {}".format(len(self._electrodes) + 1)
self._electrodes[label] = electrode
def get_plot_data(self, electrode, term_zero=True):
"""
Args:
electrode: Electrode object
term_zero: If True append zero voltage point at the end
Returns:
Plot data in x, y.
"""
x = []
y = []
cap = 0
for sub_electrode in electrode.get_sub_electrodes(adjacent_only=True):
if self.hide_negative and sub_electrode.get_average_voltage() < 0:
continue
if self.xaxis in {"capacity_grav", "capacity"}:
x.append(cap)
cap += sub_electrode.get_capacity_grav()
x.append(cap)
elif self.xaxis == "capacity_vol":
x.append(cap)
cap += sub_electrode.get_capacity_vol()
x.append(cap)
elif self.xaxis == "x_form":
x.append(sub_electrode.x_charge)
x.append(sub_electrode.x_discharge)
elif self.xaxis == "frac_x":
x.append(sub_electrode.voltage_pairs[0].frac_charge)
x.append(sub_electrode.voltage_pairs[0].frac_discharge)
else:
raise NotImplementedError("x_axis must be capacity_grav/capacity_vol/x_form/frac_x")
y.extend([sub_electrode.get_average_voltage()] * 2)
if term_zero:
x.append(x[-1])
y.append(0)
return x, y
def get_plot(self, width=8, height=8, term_zero=True):
"""
Returns a plot object.
Args:
width: Width of the plot. Defaults to 8 in.
height: Height of the plot. Defaults to 6 in.
term_zero: If True append zero voltage point at the end
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(width, height)
wion_symbol = set()
formula = set()
for label, electrode in self._electrodes.items():
(x, y) = self.get_plot_data(electrode, term_zero=term_zero)
wion_symbol.add(electrode.working_ion.symbol)
formula.add(electrode._framework_formula)
plt.plot(x, y, "-", linewidth=2, label=label)
plt.legend()
plt.xlabel(self._choose_best_x_lable(formula=formula, wion_symbol=wion_symbol))
plt.ylabel("Voltage (V)")
plt.tight_layout()
return plt
def get_plotly_figure(
self,
width=800,
height=600,
font_dict=None,
term_zero=True,
**kwargs,
):
"""
Return plotly Figure object
Args:
width: Width of the plot. Defaults to 800 px.
height: Height of the plot. Defaults to 600 px.
font: dictionary that defines the font
term_zero: If True append zero voltage point at the end
**kwargs:
Returns:
"""
font_dict = dict(family="Arial", size=24, color="#000000") if font_dict is None else font_dict
hover_temp = "Voltage : %{y:.2f} V"
data = []
wion_symbol = set()
formula = set()
for label, electrode in self._electrodes.items():
(x, y) = self.get_plot_data(electrode, term_zero=term_zero)
wion_symbol.add(electrode.working_ion.symbol)
formula.add(electrode._framework_formula)
data.append(go.Scatter(x=x, y=y, name=label, hovertemplate=hover_temp))
fig = go.Figure(
data=data,
layout=go.Layout(
title="Voltage vs. Capacity",
width=width,
height=height,
font=font_dict,
xaxis=dict(title=self._choose_best_x_lable(formula=formula, wion_symbol=wion_symbol)),
yaxis=dict(title="Voltage (V)"),
**kwargs,
),
)
fig.update_layout(template="plotly_white", title_x=0.5)
return fig
def _choose_best_x_lable(self, formula, wion_symbol):
if self.xaxis in {"capacity", "capacity_grav"}:
return "Capacity (mAh/g)"
if self.xaxis == "capacity_vol":
return "Capacity (Ah/l)"
if len(formula) == 1:
formula = formula.pop()
else:
formula = None
if len(wion_symbol) == 1:
wion_symbol = wion_symbol.pop()
else:
wion_symbol = None
if self.xaxis == "x_form":
if formula and wion_symbol:
return f"x in {wion_symbol}<sub>x</sub>{formula}"
return "x Workion Ion per Host F.U."
if self.xaxis == "frac_x":
if wion_symbol:
return f"Atomic Fraction of {wion_symbol}"
return "Atomic Fraction of Working Ion"
raise RuntimeError("No xaxis label can be determined")
def show(self, width=8, height=6):
"""
Show the voltage profile plot.
Args:
width: Width of the plot. Defaults to 8 in.
height: Height of the plot. Defaults to 6 in.
"""
self.get_plot(width, height).show()
def save(self, filename, image_format="eps", width=8, height=6):
"""
Save the plot to an image file.
Args:
filename: Filename to save to.
image_format: Format to save to. Defaults to eps.
"""
self.get_plot(width, height).savefig(filename, format=image_format)
| mit |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/IPython/testing/decorators.py | 3 | 11999 | # -*- coding: utf-8 -*-
"""Decorators for labeling test objects.
Decorators that merely return a modified version of the original function
object are straightforward. Decorators that return a new function object need
to use nose.tools.make_decorator(original_function)(decorator) in returning the
decorator, in order to preserve metadata such as function name, setup and
teardown functions and so on - see nose.tools for more information.
This module provides a set of useful decorators meant to be ready to use in
your own tests. See the bottom of the file for the ready-made ones, and if you
find yourself writing a new one that may be of generic use, add it here.
Included decorators:
Lightweight testing that remains unittest-compatible.
- An @as_unittest decorator can be used to tag any normal parameter-less
function as a unittest TestCase. Then, both nose and normal unittest will
recognize it as such. This will make it easier to migrate away from Nose if
we ever need/want to while maintaining very lightweight tests.
NOTE: This file contains IPython-specific decorators. Using the machinery in
IPython.external.decorators, we import either numpy.testing.decorators if numpy is
available, OR use equivalent code in IPython.external._decorators, which
we've copied verbatim from numpy.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import sys
import os
import tempfile
import unittest
from decorator import decorator
# Expose the unittest-driven decorators
from .ipunittest import ipdoctest, ipdocstring
# Grab the numpy-specific decorators which we keep in a file that we
# occasionally update from upstream: decorators.py is a copy of
# numpy.testing.decorators, we expose all of it here.
from IPython.external.decorators import *
# For onlyif_cmd_exists decorator
from IPython.utils.py3compat import string_types, which
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
# Simple example of the basic idea
def as_unittest(func):
"""Decorator to make a simple function into a normal test via unittest."""
class Tester(unittest.TestCase):
def test(self):
func()
Tester.__name__ = func.__name__
return Tester
# Utility functions
def apply_wrapper(wrapper,func):
"""Apply a wrapper to a function for decoration.
This mixes Michele Simionato's decorator tool with nose's make_decorator,
to apply a wrapper in a decorator so that all nose attributes, as well as
function signature and other properties, survive the decoration cleanly.
This will ensure that wrapped functions can still be well introspected via
IPython, for example.
"""
import nose.tools
return decorator(wrapper,nose.tools.make_decorator(func)(wrapper))
def make_label_dec(label,ds=None):
"""Factory function to create a decorator that applies one or more labels.
Parameters
----------
label : string or sequence
One or more labels that will be applied by the decorator to the functions
it decorates. Labels are attributes of the decorated function with their
value set to True.
ds : string
An optional docstring for the resulting decorator. If not given, a
default docstring is auto-generated.
Returns
-------
A decorator.
Examples
--------
A simple labeling decorator:
>>> slow = make_label_dec('slow')
>>> slow.__doc__
"Labels a test as 'slow'."
And one that uses multiple labels and a custom docstring:
>>> rare = make_label_dec(['slow','hard'],
... "Mix labels 'slow' and 'hard' for rare tests.")
>>> rare.__doc__
"Mix labels 'slow' and 'hard' for rare tests."
Now, let's test using this one:
>>> @rare
... def f(): pass
...
>>>
>>> f.slow
True
>>> f.hard
True
"""
if isinstance(label, string_types):
labels = [label]
else:
labels = label
# Validate that the given label(s) are OK for use in setattr() by doing a
# dry run on a dummy function.
tmp = lambda : None
for label in labels:
setattr(tmp,label,True)
# This is the actual decorator we'll return
def decor(f):
for label in labels:
setattr(f,label,True)
return f
# Apply the user's docstring, or autogenerate a basic one
if ds is None:
ds = "Labels a test as %r." % label
decor.__doc__ = ds
return decor
# Inspired by numpy's skipif, but uses the full apply_wrapper utility to
# preserve function metadata better and allows the skip condition to be a
# callable.
def skipif(skip_condition, msg=None):
''' Make function raise SkipTest exception if skip_condition is true
Parameters
----------
skip_condition : bool or callable
Flag to determine whether to skip test. If the condition is a
callable, it is used at runtime to dynamically make the decision. This
is useful for tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a SkipTest exception.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
Notes
-----
You will see from the code that we had to further decorate the
decorator with the nose.tools.make_decorator function in order to
transmit function name, and various other metadata.
'''
def skip_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Allow for both boolean or callable skip conditions.
if callable(skip_condition):
skip_val = skip_condition
else:
skip_val = lambda : skip_condition
def get_msg(func,msg=None):
"""Skip message with information about function being skipped."""
if msg is None: out = 'Test skipped due to test condition.'
else: out = msg
return "Skipping test: %s. %s" % (func.__name__,out)
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
raise nose.SkipTest(get_msg(f,msg))
else:
for x in f(*args, **kwargs):
yield x
# Choose the right skipper to use when building the actual generator.
if nose.util.isgenerator(f):
skipper = skipper_gen
else:
skipper = skipper_func
return nose.tools.make_decorator(f)(skipper)
return skip_decorator
# A version with the condition set to true, common case just to attach a message
# to a skip decorator
def skip(msg=None):
"""Decorator factory - mark a test function for skipping from test suite.
Parameters
----------
msg : string
Optional message to be added.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised, with the optional message added.
"""
return skipif(True,msg)
def onlyif(condition, msg):
"""The reverse from skipif, see skipif for details."""
if callable(condition):
skip_condition = lambda : not condition()
else:
skip_condition = lambda : not condition
return skipif(skip_condition, msg)
#-----------------------------------------------------------------------------
# Utility functions for decorators
def module_not_available(module):
"""Can module be imported? Returns true if module does NOT import.
This is used to make a decorator to skip tests that require module to be
available, but delay the 'import numpy' to test execution time.
"""
try:
mod = __import__(module)
mod_not_avail = False
except ImportError:
mod_not_avail = True
return mod_not_avail
def decorated_dummy(dec, name):
"""Return a dummy function decorated with dec, with the given name.
Examples
--------
import IPython.testing.decorators as dec
setup = dec.decorated_dummy(dec.skip_if_no_x11, __name__)
"""
dummy = lambda: None
dummy.__name__ = name
return dec(dummy)
#-----------------------------------------------------------------------------
# Decorators for public use
# Decorators to skip certain tests on specific platforms.
skip_win32 = skipif(sys.platform == 'win32',
"This test does not run under Windows")
skip_linux = skipif(sys.platform.startswith('linux'),
"This test does not run under Linux")
skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X")
# Decorators to skip tests if not on specific platforms.
skip_if_not_win32 = skipif(sys.platform != 'win32',
"This test only runs under Windows")
skip_if_not_linux = skipif(not sys.platform.startswith('linux'),
"This test only runs under Linux")
skip_if_not_osx = skipif(sys.platform != 'darwin',
"This test only runs under OSX")
_x11_skip_cond = (sys.platform not in ('darwin', 'win32') and
os.environ.get('DISPLAY', '') == '')
_x11_skip_msg = "Skipped under *nix when X11/XOrg not available"
skip_if_no_x11 = skipif(_x11_skip_cond, _x11_skip_msg)
# not a decorator itself, returns a dummy function to be used as setup
def skip_file_no_x11(name):
return decorated_dummy(skip_if_no_x11, name) if _x11_skip_cond else None
# Other skip decorators
# generic skip without module
skip_without = lambda mod: skipif(module_not_available(mod), "This test requires %s" % mod)
skipif_not_numpy = skip_without('numpy')
skipif_not_matplotlib = skip_without('matplotlib')
skipif_not_sympy = skip_without('sympy')
skip_known_failure = knownfailureif(True,'This test is known to fail')
known_failure_py3 = knownfailureif(sys.version_info[0] >= 3,
'This test is known to fail on Python 3.')
# A null 'decorator', useful to make more readable code that needs to pick
# between different decorators based on OS or other conditions
null_deco = lambda f: f
# Some tests only run where we can use unicode paths. Note that we can't just
# check os.path.supports_unicode_filenames, which is always False on Linux.
try:
f = tempfile.NamedTemporaryFile(prefix=u"tmp€")
except UnicodeEncodeError:
unicode_paths = False
else:
unicode_paths = True
f.close()
onlyif_unicode_paths = onlyif(unicode_paths, ("This test is only applicable "
"where we can use unicode in filenames."))
def onlyif_cmds_exist(*commands):
"""
Decorator to skip test when at least one of `commands` is not found.
"""
for cmd in commands:
if not which(cmd):
return skip("This test runs only if command '{0}' "
"is installed".format(cmd))
return null_deco
def onlyif_any_cmd_exists(*commands):
"""
Decorator to skip test unless at least one of `commands` is found.
"""
for cmd in commands:
if which(cmd):
return null_deco
return skip("This test runs only if one of the commands {0} "
"is installed".format(commands))
| artistic-2.0 |
mitschabaude/nanopores | scripts/stokesian/events.py | 1 | 3563 | # draggable circles adapted from draggable rectangle demo
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
class DraggableRectangle:
lock = None # only one can be animated at a time
def __init__(self, rect):
self.rect = rect
self.press = None
self.background = None
def connect(self):
'connect to all the events we need'
self.cidpress = self.rect.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.rect.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.rect.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
def on_press(self, event):
'on button press we will see if the mouse is over us and store some data'
if event.inaxes != self.rect.axes: return
if DraggableRectangle.lock is not None: return
contains, attrd = self.rect.contains(event)
if not contains: return
print('event contains', self.rect.center)
x0, y0 = self.rect.center
self.press = x0, y0, event.xdata, event.ydata
DraggableRectangle.lock = self
# draw everything but the selected rectangle and store the pixel buffer
canvas = self.rect.figure.canvas
axes = self.rect.axes
self.rect.set_animated(True)
canvas.draw()
self.background = canvas.copy_from_bbox(self.rect.axes.bbox)
# now redraw just the rectangle
axes.draw_artist(self.rect)
# and blit just the redrawn area
canvas.blit(axes.bbox)
def on_motion(self, event):
'on motion we will move the rect if the mouse is over us'
if DraggableRectangle.lock is not self:
return
if event.inaxes != self.rect.axes: return
x0, y0, xpress, ypress = self.press
dx = event.xdata - xpress
dy = event.ydata - ypress
self.rect.center = x0 + dx, y0 + dy
#self.rect.set_x(x0+dx)
#self.rect.set_y(y0+dy)
canvas = self.rect.figure.canvas
axes = self.rect.axes
# restore the background region
canvas.restore_region(self.background)
# redraw just the current rectangle
axes.draw_artist(self.rect)
# blit just the redrawn area
canvas.blit(axes.bbox)
def on_release(self, event):
'on release we reset the press data'
if DraggableRectangle.lock is not self:
return
self.press = None
DraggableRectangle.lock = None
# turn off the rect animation property and reset the background
self.rect.set_animated(False)
self.background = None
# redraw the full figure
self.rect.figure.canvas.draw()
def disconnect(self):
'disconnect all the stored connection ids'
self.rect.figure.canvas.mpl_disconnect(self.cidpress)
self.rect.figure.canvas.mpl_disconnect(self.cidrelease)
self.rect.figure.canvas.mpl_disconnect(self.cidmotion)
rnd = np.random
N = 10
circles = [Circle(xy=rnd.rand(2)*10, radius=rnd.rand(), angle=rnd.rand()*360)
for i in range(N)]
fig = plt.figure(0)
ax = fig.add_subplot(111, aspect='equal')
for c in circles:
ax.add_artist(c)
c.set_clip_box(ax.bbox)
c.set_alpha(rnd.rand())
c.set_facecolor(rnd.rand(3))
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
dcs = []
for c in circles:
dc = DraggableRectangle(c)
dc.connect()
dcs.append(dc)
plt.show()
| mit |
jeremiedecock/snippets | python/matplotlib/contours/label_on_contours_same_color.py | 1 | 1331 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# See http://matplotlib.org/api/axes_api.html?highlight=contour#matplotlib.axes.Axes.contour
import math
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
def main():
"""The main function."""
# Build data ################
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
#Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
#Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
#Z = 10.0 * (Z2 - Z1) # difference of Gaussians
Z = X * np.exp(-X**2 - Y**2)
# Plot data #################
## Plot contour
#CS = plt.contour(X, Y, Z, colors='k') # "colors='k'" -> force all the contours to be the same color.
#plt.clabel(CS, inline=1, fontsize=10)
fig = plt.figure()
ax = fig.add_subplot(111)
# Plot contour
CS = ax.contour(X, Y, Z, colors='k') # "colors='k'" -> force all the contours to be the same color.
ax.clabel(CS, inline=1, fontsize=10)
plt.show()
if __name__ == '__main__':
main()
| mit |
StratsOn/zipline | tests/test_examples.py | 11 | 1334 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code is based on a unittest written by John Salvatier:
# https://github.com/pymc-devs/pymc/blob/pymc3/tests/test_examples.py
# Disable plotting
#
import glob
import imp
import matplotlib
from nose_parameterized import parameterized
import os
from unittest import TestCase
matplotlib.use('Agg')
def example_dir():
import zipline
d = os.path.dirname(zipline.__file__)
return os.path.join(os.path.abspath(d), 'examples')
class ExamplesTests(TestCase):
@parameterized.expand(((os.path.basename(f).replace('.', '_'), f) for f in
glob.glob(os.path.join(example_dir(), '*.py'))))
def test_example(self, name, example):
imp.load_source('__main__', os.path.basename(example), open(example))
| apache-2.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/animation/old_animation/dynamic_image_wxagg2.py | 10 | 3037 | #!/usr/bin/env python
"""
Copyright (C) 2003-2005 Jeremy O'Donoghue and others
License: This work is licensed under the PSF. A copy should be included
with this source code, and is also available at
http://www.python.org/psf/license.html
"""
import sys, time, os, gc
import matplotlib
matplotlib.use('WXAgg')
from matplotlib import rcParams
import numpy as npy
import matplotlib.cm as cm
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
from wx import *
TIMER_ID = NewId()
class PlotFigure(Frame):
def __init__(self):
Frame.__init__(self, None, -1, "Test embedded wxFigure")
self.fig = Figure((5,4), 75)
self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
# On Windows, default frame size behaviour is incorrect
# you don't need this under Linux
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
self.toolbar.SetSize(Size(fw, th))
# Create a figure manager to manage things
# Now put all into a sizer
sizer = BoxSizer(VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(self.canvas, 1, LEFT|TOP|GROW)
# Best to allow the toolbar to resize!
sizer.Add(self.toolbar, 0, GROW)
self.SetSizer(sizer)
self.Fit()
EVT_TIMER(self, TIMER_ID, self.onTimer)
def init_plot_data(self):
# jdh you can add a subplot directly from the fig rather than
# the fig manager
a = self.fig.add_axes([0.075,0.1,0.75,0.85])
cax = self.fig.add_axes([0.85,0.1,0.075,0.85])
self.x = npy.empty((120,120))
self.x.flat = npy.arange(120.0)*2*npy.pi/120.0
self.y = npy.empty((120,120))
self.y.flat = npy.arange(120.0)*2*npy.pi/100.0
self.y = npy.transpose(self.y)
z = npy.sin(self.x) + npy.cos(self.y)
self.im = a.imshow( z, cmap=cm.jet)#, interpolation='nearest')
self.fig.colorbar(self.im,cax=cax,orientation='vertical')
def GetToolBar(self):
# You will need to override GetToolBar if you are using an
# unmanaged toolbar in your frame
return self.toolbar
def onTimer(self, evt):
self.x += npy.pi/15
self.y += npy.pi/20
z = npy.sin(self.x) + npy.cos(self.y)
self.im.set_array(z)
self.canvas.draw()
#self.canvas.gui_repaint() # jdh wxagg_draw calls this already
def onEraseBackground(self, evt):
# this is supposed to prevent redraw flicker on some X servers...
pass
if __name__ == '__main__':
app = PySimpleApp()
frame = PlotFigure()
frame.init_plot_data()
# Initialise the timer - wxPython requires this to be connected to
# the receiving event handler
t = Timer(frame, TIMER_ID)
t.Start(200)
frame.Show()
app.MainLoop()
| mit |
poryfly/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
0asa/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
GPflow/GPflow | doc/source/notebooks/advanced/coregionalisation.pct.py | 1 | 7650 | # ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # A simple demonstration of coregionalization
#
# This notebook shows how to construct a multi-output GP model using GPflow. We will consider a regression problem for functions $f: \mathbb{R}^D \rightarrow \mathbb{R}^P$. We assume that the dataset is of the form $(X_1, f_1), \dots, (X_P, f_P)$, that is, we do not necessarily observe all the outputs for a particular input location (for cases where there are fully observed outputs for each input, see [Multi-output Gaussian processes in GPflow](./multioutput.ipynb) for a more efficient implementation). We allow each $f_i$ to have a different noise distribution by assigning a different likelihood to each.
#
# For this problem, we model $f$ as a *coregionalized* Gaussian process, which assumes a kernel of the form:
#
# \begin{equation}
# \textrm{cov}(f_i(X), f_j(X^\prime)) = k(X, X^\prime) \cdot B[i, j].
# \end{equation}
#
# The covariance of the $i$th function at $X$ and the $j$th function at $X^\prime$ is a kernel applied at $X$ and $X^\prime$, times the $(i, j)$th entry of a positive definite $P \times P$ matrix $B$. This is known as the **intrinsic model of coregionalization (ICM)** (Bonilla and Williams, 2008).
#
# To make sure that B is positive-definite, we parameterize it as:
#
# \begin{equation}
# B = W W^\top + \textrm{diag}(\kappa).
# \end{equation}
#
# To build such a model in GPflow, we need to perform the following two steps:
#
# * Create the kernel function defined previously, using the `Coregion` kernel class.
# * Augment the training data X with an extra column that contains an integer index to indicate which output an observation is associated with. This is essential to make the data work with the `Coregion` kernel.
# * Create a likelihood for each output using the `SwitchedLikelihood` class, which is a container for other likelihoods.
# * Augment the training data Y with an extra column that contains an integer index to indicate which likelihood an observation is associated with.
# %%
import gpflow
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from gpflow.ci_utils import ci_niter
plt.rcParams["figure.figsize"] = (12, 6)
np.random.seed(123)
# %% [markdown]
# ## Data preparation
# We start by generating some training data to fit the model with. For this example, we choose the following two correlated functions for our outputs:
#
# \begin{align}
# y_1 &= \sin(6x) + \epsilon_1, \qquad \epsilon_1 \sim \mathcal{N}(0, 0.009) \\
# y_2 &= \sin(6x + 0.7) + \epsilon_2, \qquad \epsilon_2 \sim \mathcal{N}(0, 0.01)
# \end{align}
#
# %%
# make a dataset with two outputs, correlated, heavy-tail noise. One has more noise than the other.
X1 = np.random.rand(100, 1) # Observed locations for first output
X2 = np.random.rand(50, 1) * 0.5 # Observed locations for second output
Y1 = np.sin(6 * X1) + np.random.randn(*X1.shape) * 0.03
Y2 = np.sin(6 * X2 + 0.7) + np.random.randn(*X2.shape) * 0.1
plt.figure(figsize=(8, 4))
plt.plot(X1, Y1, "x", mew=2)
_ = plt.plot(X2, Y2, "x", mew=2)
# %% [markdown]
# ## Data formatting for the coregionalized model
# We add an extra column to our training dataset that contains an index that specifies which output is observed.
# %%
# Augment the input with ones or zeros to indicate the required output dimension
X_augmented = np.vstack((np.hstack((X1, np.zeros_like(X1))), np.hstack((X2, np.ones_like(X2)))))
# Augment the Y data with ones or zeros that specify a likelihood from the list of likelihoods
Y_augmented = np.vstack((np.hstack((Y1, np.zeros_like(Y1))), np.hstack((Y2, np.ones_like(Y2)))))
# %% [markdown]
# ## Building the coregionalization kernel
# We build a coregionalization kernel with the Matern 3/2 kernel as the base kernel. This acts on the leading ([0]) data dimension of the augmented X values. The `Coregion` kernel indexes the outputs, and acts on the last ([1]) data dimension (indices) of the augmented X values. To specify these dimensions, we use the built-in `active_dims` argument in the kernel constructor. To construct the full multi-output kernel, we take the product of the two kernels (for a more in-depth tutorial on kernel combination, see [Manipulating kernels](./kernels.ipynb)).
# %%
output_dim = 2 # Number of outputs
rank = 1 # Rank of W
# Base kernel
k = gpflow.kernels.Matern32(active_dims=[0])
# Coregion kernel
coreg = gpflow.kernels.Coregion(output_dim=output_dim, rank=rank, active_dims=[1])
kern = k * coreg
# %% [markdown]
# **Note:** W = 0 is a saddle point in the objective, which would result in the value of `W` not being optimized to fit the data.
# Hence, by default, the `W` matrix is initialized with 0.1. Alternatively, you could re-initialize the matrix to random entries.
# %% [markdown]
# ## Constructing the model
# The final element in building the model is to specify the likelihood for each output dimension. To do this, use the `SwitchedLikelihood` object in GPflow.
# %%
# This likelihood switches between Gaussian noise with different variances for each f_i:
lik = gpflow.likelihoods.SwitchedLikelihood(
[gpflow.likelihoods.Gaussian(), gpflow.likelihoods.Gaussian()]
)
# now build the GP model as normal
m = gpflow.models.VGP((X_augmented, Y_augmented), kernel=kern, likelihood=lik)
# fit the covariance function parameters
maxiter = ci_niter(10000)
gpflow.optimizers.Scipy().minimize(
m.training_loss,
m.trainable_variables,
options=dict(maxiter=maxiter),
method="L-BFGS-B",
)
# %% [markdown]
# That's it: the model is trained. Let's plot the model fit to see what's happened.
# %%
def plot_gp(x, mu, var, color, label):
plt.plot(x, mu, color=color, lw=2, label=label)
plt.fill_between(
x[:, 0],
(mu - 2 * np.sqrt(var))[:, 0],
(mu + 2 * np.sqrt(var))[:, 0],
color=color,
alpha=0.4,
)
def plot(m):
plt.figure(figsize=(8, 4))
Xtest = np.linspace(0, 1, 100)[:, None]
(line,) = plt.plot(X1, Y1, "x", mew=2)
mu, var = m.predict_f(np.hstack((Xtest, np.zeros_like(Xtest))))
plot_gp(Xtest, mu, var, line.get_color(), "Y1")
(line,) = plt.plot(X2, Y2, "x", mew=2)
mu, var = m.predict_f(np.hstack((Xtest, np.ones_like(Xtest))))
plot_gp(Xtest, mu, var, line.get_color(), "Y2")
plt.legend()
plot(m)
# %% [markdown]
# From the plots, we can see:
#
# - The first function (blue) has low posterior variance everywhere because there are so many observations, and the noise variance is small.
# - The second function (orange) has higher posterior variance near the data, because the data are more noisy, and very high posterior variance where there are no observations (x > 0.5).
# - The model has done a reasonable job of estimating the noise variance and lengthscale.
# - The model recognises the correlation between the two functions and is able to suggest (with uncertainty) that because x > 0.5 the orange curve should follow the blue curve (which we know to be the truth from the data-generating procedure).
#
# The covariance matrix between outputs is as follows:
# %%
B = coreg.output_covariance().numpy()
print("B =", B)
_ = plt.imshow(B)
# %% [markdown]
# ## References
#
# Bonilla, Edwin V., Kian M. Chai, and Christopher Williams. "Multi-task Gaussian process prediction." _Advances in neural information processing systems_. 2008.
| apache-2.0 |
mtth/hdfs | setup.py | 1 | 1749 | #!/usr/bin/env python
"""HdfsCLI: API and command line interface for HDFS."""
from os import environ
from setuptools import find_packages, setup
import re
def _get_version():
"""Extract version from package."""
with open('hdfs/__init__.py') as reader:
match = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
reader.read(),
re.MULTILINE
)
if match:
return match.group(1)
else:
raise RuntimeError('Unable to extract version.')
def _get_long_description():
"""Get README contents."""
with open('README.md') as reader:
return reader.read()
# Allow configuration of the CLI alias.
ENTRY_POINT = environ.get('HDFSCLI_ENTRY_POINT', 'hdfscli')
setup(
name='hdfs',
version=_get_version(),
description=__doc__,
long_description=_get_long_description(),
long_description_content_type='text/markdown',
author='Matthieu Monsch',
author_email='[email protected]',
url='https://hdfscli.readthedocs.io',
license='MIT',
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'docopt',
'requests>=2.7.0',
'six>=1.9.0',
],
extras_require={
'avro': ['fastavro>=0.21.19'],
'kerberos': ['requests-kerberos>=0.7.0'],
'dataframe': ['fastavro>=0.21.19', 'pandas>=0.14.1'],
},
entry_points={'console_scripts': [
'%s = hdfs.__main__:main' % (ENTRY_POINT, ),
'%s-avro = hdfs.ext.avro.__main__:main' % (ENTRY_POINT, ),
]},
)
| mit |
gtnx/pandas-highcharts | pandas_highcharts/tests.py | 1 | 5332 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
import json
import pandas
from unittest import TestCase
from .core import serialize, json_encode
df = pandas.DataFrame([
{'a': 1, 'b': 2, 'c': 3, 't': datetime.datetime(2015, 1, 1), 's': 's1'},
{'a': 2, 'b': 4, 'c': 6, 't': datetime.datetime(2015, 1, 2), 's': 's2'}
])
class CoreTest(TestCase):
def test_type(self):
self.assertEqual(type(serialize(df, render_to="chart")), str)
obj = serialize(df, render_to="chart", output_type="dict")
self.assertEqual(type(obj), dict)
self.assertTrue('series' in obj)
series = obj['series'][0]
self.assertEqual(series['name'], 'a')
self.assertTrue('data' in series)
self.assertEqual(series['data'], [(0, 1), (1, 2)])
obj = serialize(df, render_to="chart", output_type="dict", zoom="xy")
self.assertTrue("chart" in obj)
self.assertEqual(type(obj["chart"]), dict)
self.assertTrue("zoomType" in obj["chart"])
self.assertRaises(ValueError, serialize, df, **{"render_to": "chart", "zoom": "z"})
obj = serialize(df, render_to="chart", output_type="dict", kind="bar")
self.assertTrue("chart" in obj)
self.assertEqual(type(obj["chart"]), dict)
self.assertEqual(obj["chart"].get("type"), "column")
self.assertRaises(ValueError, serialize, df, **{"render_to": "chart", "kind": "z"})
obj = serialize(df, render_to="chart", output_type="dict", secondary_y="a")
self.assertTrue(obj.get("yAxis", [])[1].get('opposite'))
obj = serialize(df, render_to="chart", output_type="dict", rot=45, loglog=True)
self.assertEqual(obj.get('xAxis', {}).get('labels'), {'rotation': 45})
self.assertEqual(obj.get('yAxis', [])[0].get('labels'), {'rotation': 45})
self.assertEqual(obj.get('xAxis', {}).get('type'), 'logarithmic')
obj = serialize(df, render_to="chart", output_type="dict", x="t")
self.assertEqual(obj.get('xAxis', {}).get('type'), 'datetime')
obj = serialize(df, render_to="chart", output_type="dict", x="t", style={"a": ":"})
for series in obj.get("series"):
if series["name"] == "a":
self.assertEqual(series.get("dashStyle"), "Dot")
self.assertRaises(ValueError, serialize, df, **{"render_to": "chart", "style": {"a": "u"}})
obj = serialize(df, render_to="chart", output_type="dict", kind="area", stacked=True)
self.assertEqual(obj.get("series")[0].get("stacking"), "normal")
obj = serialize(df, render_to="chart", output_type="dict", grid=True)
self.assertEqual(obj.get('xAxis', {}).get('gridLineDashStyle'), 'Dot')
self.assertEqual(obj.get('xAxis', {}).get('gridLineWidth'), 1)
self.assertEqual(obj.get('yAxis', [])[0].get('gridLineDashStyle'), 'Dot')
self.assertEqual(obj.get('yAxis', [])[0].get('gridLineWidth'), 1)
obj = serialize(df, render_to="chart", output_type="dict", xlim=(0, 1), ylim=(0, 1))
self.assertEqual(obj.get('xAxis', {}).get('min'), 0)
self.assertEqual(obj.get('xAxis', {}).get('max'), 1)
self.assertEqual(obj.get('yAxis', [])[0].get('min'), 0)
self.assertEqual(obj.get('yAxis', [])[0].get('max'), 1)
obj = serialize(df, render_to="chart", output_type="dict", fontsize=12, figsize=(4, 5))
self.assertEqual(obj.get('xAxis', {}).get('labels', {}).get('style', {}).get('fontSize'), 12)
self.assertEqual(obj.get('yAxis', [])[0].get('labels', {}).get('style', {}).get('fontSize'), 12)
obj = serialize(df, render_to="chart", output_type="dict", title='Chart', xticks=[1], yticks=[2])
self.assertTrue(obj.get('title', {}).get('text'))
self.assertTrue(obj.get('xAxis', {}).get('tickPositions'))
for yaxis in obj.get('yAxis', []):
self.assertTrue(yaxis.get('tickPositions'))
obj = serialize(df, render_to="chart", output_type="dict", fontsize=12, kind='pie', x='s', y=['a'], tooltip={'pointFormat': '{series.name}: <b>{point.percentage:.1f}%</b>'})
self.assertTrue(obj.get('tooltip'))
obj = serialize(df, render_to="chart", output_type="dict", polar=True, x='s', y=['a'])
self.assertTrue(obj.get('chart', {}).get('polar'))
df2 = pandas.DataFrame({'s': [2, 1]}, index=['b', 'a'])
obj = serialize(df2, render_to='chart', output_type='dict', sort_columns=True)
self.assertEqual(obj['series'], [{'data': [('a', 1), ('b', 2)], 'name': 's', 'yAxis': 0}])
obj = serialize(df2, render_to='chart', output_type='dict')
self.assertEqual(obj['series'], [{'data': [('b', 2), ('a', 1)], 'name': 's', 'yAxis': 0}])
def test_json_output(self):
json_output = serialize(df, output_type="json")
self.assertEqual(type(json_output), str)
decoded = json.loads(json_output)
self.assertEqual(type(decoded), dict)
def test_jsonencoder(self):
self.assertEqual(json_encode(datetime.date(1970, 1, 1)), "0")
self.assertEqual(json_encode(datetime.date(2015, 1, 1)), "1420070400000")
self.assertEqual(json_encode(datetime.datetime(2015, 1, 1)), "1420070400000")
self.assertEqual(json_encode(pandas.tslib.Timestamp(1420070400000000000)), "1420070400000")
| mit |
orbingol/NURBS-Python | tests/test_visualization.py | 1 | 10925 | """
Tests for the NURBS-Python package
Released under The MIT License. See LICENSE file for details.
Copyright (c) 2018 Onur Rauf Bingol
Tests visualization modules. Requires "pytest" to run.
"""
import os
import pytest
from geomdl import BSpline
from geomdl import multi
from geomdl import operations
import matplotlib
matplotlib.use('agg')
from geomdl.visualization import VisMPL
SAMPLE_SIZE = 5
@pytest.fixture
def bspline_curve2d():
""" Creates a 2-dimensional B-Spline curve instance """
# Create a curve instance
curve = BSpline.Curve()
# Set curve degree
curve.degree = 3
# Set control points
curve.ctrlpts = [[5.0, 5.0], [10.0, 10.0], [20.0, 15.0], [35.0, 15.0], [45.0, 10.0], [50.0, 5.0]]
# Set knot vector
curve.knotvector = [0.0, 0.0, 0.0, 0.0, 0.33, 0.66, 1.0, 1.0, 1.0, 1.0]
# Set sample size
curve.sample_size = SAMPLE_SIZE
# Return the instance
return curve
@pytest.fixture
def bspline_curve3d():
""" Creates a 3-dimensional B-Spline curve instance """
# Create a curve instance
curve = BSpline.Curve()
# Set curve degree
curve.degree = 4
# Set control points
curve.ctrlpts = [[5.0, 15.0, 0.0], [10.0, 25.0, 5.0], [20.0, 20.0, 10.0], [15.0, -5.0, 15.0], [7.5, 10.0, 20.0],
[12.5, 15.0, 25.0], [15.0, 0.0, 30.0], [5.0, -10.0, 35.0], [10.0, 15.0, 40.0], [5.0, 15.0, 30.0]]
# Set knot vector
curve.knotvector = [0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0, 1.0, 1.0, 1.0, 1.0]
# Set sample size
curve.sample_size = SAMPLE_SIZE
# Return the instance
return curve
@pytest.fixture
def bspline_surface():
""" Creates a B-Spline surface instance """
# Create a surface instance
surf = BSpline.Surface()
# Set degrees
surf.degree_u = 3
surf.degree_v = 3
# Set control points
surf.ctrlpts_size_u = 6
surf.ctrlpts_size_v = 6
surf.ctrlpts = [[-25.0, -25.0, -10.0], [-25.0, -15.0, -5.0], [-25.0, -5.0, 0.0], [-25.0, 5.0, 0.0],
[-25.0, 15.0, -5.0], [-25.0, 25.0, -10.0], [-15.0, -25.0, -8.0], [-15.0, -15.0, -4.0],
[-15.0, -5.0, -4.0], [-15.0, 5.0, -4.0], [-15.0, 15.0, -4.0], [-15.0, 25.0, -8.0],
[-5.0, -25.0, -5.0], [-5.0, -15.0, -3.0], [-5.0, -5.0, -8.0], [-5.0, 5.0, -8.0],
[-5.0, 15.0, -3.0], [-5.0, 25.0, -5.0], [5.0, -25.0, -3.0], [5.0, -15.0, -2.0],
[5.0, -5.0, -8.0], [5.0, 5.0, -8.0], [5.0, 15.0, -2.0], [5.0, 25.0, -3.0],
[15.0, -25.0, -8.0], [15.0, -15.0, -4.0], [15.0, -5.0, -4.0], [15.0, 5.0, -4.0],
[15.0, 15.0, -4.0], [15.0, 25.0, -8.0], [25.0, -25.0, -10.0], [25.0, -15.0, -5.0],
[25.0, -5.0, 2.0], [25.0, 5.0, 2.0], [25.0, 15.0, -5.0], [25.0, 25.0, -10.0]]
# Set knot vectors
surf.knotvector_u = [0.0, 0.0, 0.0, 0.0, 0.33, 0.66, 1.0, 1.0, 1.0, 1.0]
surf.knotvector_v = [0.0, 0.0, 0.0, 0.0, 0.33, 0.66, 1.0, 1.0, 1.0, 1.0]
# Set sample size
surf.sample_size = SAMPLE_SIZE
# Return the instance
return surf
# Test if plotting a 2-dimensional curve without a window is possible
def test_curve2d_fig_nowindow(bspline_curve2d):
conf = VisMPL.VisConfig()
vis = VisMPL.VisCurve3D(config=conf)
fname = conf.figure_image_filename
bspline_curve2d.vis = vis
bspline_curve2d.render(plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(conf.figure_image_filename):
os.remove(conf.figure_image_filename)
# Test if using a different file name is possible
def test_curve2d_fig_save(bspline_curve2d):
conf = VisMPL.VisConfig()
vis = VisMPL.VisCurve2D(config=conf)
fname = "test-curve.png"
bspline_curve2d.vis = vis
bspline_curve2d.render(filename=fname, plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(fname):
os.remove(fname)
# Test if plotting a 2-dimensional multi-curve without a window is possible
def test_curve2d_multi_fig_nowindow(bspline_curve2d):
conf = VisMPL.VisConfig()
vis = VisMPL.VisCurve2D(config=conf)
fname = conf.figure_image_filename
data = operations.decompose_curve(bspline_curve2d)
multi_shape = multi.CurveContainer(data)
multi_shape.vis = vis
multi_shape.render(plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(conf.figure_image_filename):
os.remove(conf.figure_image_filename)
# Test if using a different file name is possible
def test_curve2d_multi_fig_save(bspline_curve2d):
conf = VisMPL.VisConfig()
vis = VisMPL.VisCurve2D(config=conf)
fname = "test-multi_curve.png"
data = operations.decompose_curve(bspline_curve2d)
multi_shape = multi.CurveContainer(data)
multi_shape.vis = vis
multi_shape.render(filename=fname, plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(fname):
os.remove(fname)
# Test if plotting a 3-dimensional curve without a window is possible
def test_curve3d_fig_nowindow(bspline_curve3d):
conf = VisMPL.VisConfig()
vis = VisMPL.VisCurve2D(config=conf)
fname = conf.figure_image_filename
bspline_curve3d.vis = vis
bspline_curve3d.render(plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(conf.figure_image_filename):
os.remove(conf.figure_image_filename)
# Test if using a different file name is possible
def test_curve3d_fig_save(bspline_curve3d):
conf = VisMPL.VisConfig()
vis = VisMPL.VisCurve3D(config=conf)
fname = "test-curve.png"
bspline_curve3d.vis = vis
bspline_curve3d.render(filename=fname, plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(fname):
os.remove(fname)
# Test if plotting a 3-dimensional multi-curve without a window is possible
def test_curve3d_multi_fig_nowindow(bspline_curve3d):
conf = VisMPL.VisConfig()
vis = VisMPL.VisCurve3D(config=conf)
data = operations.decompose_curve(bspline_curve3d)
multi_shape = multi.CurveContainer(data)
multi_shape.vis = vis
multi_shape.render(plot=False)
assert os.path.isfile(conf.figure_image_filename)
assert os.path.getsize(conf.figure_image_filename) > 0
# Clean up temporary file if exists
if os.path.isfile(conf.figure_image_filename):
os.remove(conf.figure_image_filename)
# Test if using a different file name is possible
def test_curve3d_multi_fig_save(bspline_curve3d):
conf = VisMPL.VisConfig()
vis = VisMPL.VisCurve3D(config=conf)
fname = "test-multi_curve.png"
data = operations.decompose_curve(bspline_curve3d)
multi_shape = multi.CurveContainer(data)
multi_shape.vis = vis
multi_shape.render(filename=fname, plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(fname):
os.remove(fname)
# Test if plotting a surface without a window is possible
def test_surf_fig_nowindow(bspline_surface):
conf = VisMPL.VisConfig()
vis = VisMPL.VisSurface(config=conf)
fname = conf.figure_image_filename
bspline_surface.vis = vis
bspline_surface.render(plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(conf.figure_image_filename):
os.remove(conf.figure_image_filename)
# Test if using a different file name is possible
def test_surf_fig_save(bspline_surface):
conf = VisMPL.VisConfig()
vis = VisMPL.VisSurface(config=conf)
fname = "test-surface.png"
bspline_surface.vis = vis
bspline_surface.render(filename=fname, plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(fname):
os.remove(fname)
# Test offsetting control points grid
def test_surf_ctrlpts_offset(bspline_surface):
conf = VisMPL.VisConfig()
vis = VisMPL.VisSurface(config=conf)
# Set control points grid offset
vis.ctrlpts_offset = 3.5
fname = "test-surface.png"
bspline_surface.vis = vis
bspline_surface.render(filename=fname, plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(fname):
os.remove(fname)
# Test if plotting a multi-surface without a window is possible
def test_surf_multi_fig_nowindow(bspline_surface):
conf = VisMPL.VisConfig()
vis = VisMPL.VisSurfScatter(config=conf)
fname = conf.figure_image_filename
data = operations.decompose_surface(bspline_surface)
multi_shape = multi.SurfaceContainer(data)
multi_shape.vis = vis
multi_shape.render(plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(conf.figure_image_filename):
os.remove(conf.figure_image_filename)
# Test if using a different file name is possible
def test_surf_multi_fig_save(bspline_surface):
conf = VisMPL.VisConfig()
vis = VisMPL.VisSurfWireframe(config=conf)
fname = "test-multi_surface.png"
data = operations.decompose_surface(bspline_surface)
multi_shape = multi.SurfaceContainer(data)
multi_shape.vis = vis
multi_shape.render(filename=fname, plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(fname):
os.remove(fname)
def test_deriv_curve_fig(bspline_curve2d):
fname = "test-derivative_curve.png"
data = operations.derivative_curve(bspline_curve2d)
multi_shape = multi.CurveContainer(data)
multi_shape.vis = VisMPL.VisCurve2D()
multi_shape.render(filename=fname, plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(fname):
os.remove(fname)
def test_deriv_surf_fig(bspline_surface):
fname = "test-derivative_surface.png"
data = operations.derivative_surface(bspline_surface)
multi_shape = multi.SurfaceContainer(data)
multi_shape.vis = VisMPL.VisSurface()
multi_shape.render(filename=fname, plot=False)
assert os.path.isfile(fname)
assert os.path.getsize(fname) > 0
# Clean up temporary file if exists
if os.path.isfile(fname):
os.remove(fname)
| mit |
CodingCat/mxnet | example/kaggle-ndsb1/gen_img_list.py | 42 | 7000 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import csv
import os
import sys
import random
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='generate train/test image list files form input directory. If training it will also split into tr and va sets.')
parser.add_argument('--image-folder', type=str, default="data/train/",
help='the input data directory')
parser.add_argument('--out-folder', type=str, default="data/",
help='the output folder')
parser.add_argument('--out-file', type=str, default="train.lst",
help='the output lst file')
parser.add_argument('--train', action='store_true',
help='if we are generating training list and hence we have to loop over subdirectories')
## These options are only used if we are doing training lst
parser.add_argument('--percent-val', type=float, default=0.25,
help='the percentage of training list to use as validation')
parser.add_argument('--stratified', action='store_true',
help='if True it will split train lst into tr and va sets using stratified sampling')
args = parser.parse_args()
random.seed(888)
fo_name=os.path.join(args.out_folder+args.out_file)
fo = csv.writer(open(fo_name, "w"), delimiter='\t', lineterminator='\n')
if args.train:
tr_fo_name=os.path.join(args.out_folder+"tr.lst")
va_fo_name=os.path.join(args.out_folder+"va.lst")
tr_fo = csv.writer(open(tr_fo_name, "w"), delimiter='\t', lineterminator='\n')
va_fo = csv.writer(open(va_fo_name, "w"), delimiter='\t', lineterminator='\n')
#check sampleSubmission.csv from kaggle website to view submission format
head = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# make image list
img_lst = []
cnt = 0
if args.train:
for i in xrange(len(head)):
path = args.image_folder + head[i]
lst = os.listdir(args.image_folder + head[i])
for img in lst:
img_lst.append((cnt, i, path + '/' + img))
cnt += 1
else:
lst = os.listdir(args.image_folder)
for img in lst:
img_lst.append((cnt, 0, args.image_folder + img))
cnt += 1
# shuffle
random.shuffle(img_lst)
#write
for item in img_lst:
fo.writerow(item)
## If training, split into train and validation lists (tr.lst and va.lst)
## Optional stratified sampling
if args.train:
img_lst=np.array(img_lst)
if args.stratified:
from sklearn.cross_validation import StratifiedShuffleSplit
## Stratified sampling to generate train and validation sets
labels_train=img_lst[:,1]
# unique_train, counts_train = np.unique(labels_train, return_counts=True) # To have a look at the frecuency distribution
sss = StratifiedShuffleSplit(labels_train, 1, test_size=args.percent_val, random_state=0)
for tr_idx, va_idx in sss:
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
else:
(nRows, nCols) = img_lst.shape
splitat=int(round(nRows*(1-args.percent_val),0))
tr_idx=range(0,splitat)
va_idx=range(splitat,nRows)
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
tr_lst=img_lst[tr_idx,:].tolist()
va_lst=img_lst[va_idx,:].tolist()
for item in tr_lst:
tr_fo.writerow(item)
for item in va_lst:
va_fo.writerow(item)
| apache-2.0 |
RayMick/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
google/lasr | render_vis.py | 1 | 13780 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
os.environ["PYOPENGL_PLATFORM"] = "egl" #opengl seems to only work with TPU
sys.path.insert(0,'third_party')
import subprocess
import imageio
import glob
from ext_utils.badja_data import BADJAData
from ext_utils.joint_catalog import SMALJointInfo
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
import pdb
import soft_renderer as sr
import argparse
import trimesh
from nnutils.geom_utils import obj_to_cam, pinhole_cam
import pyrender
from pyrender import IntrinsicsCamera,Mesh, Node, Scene,OffscreenRenderer
import configparser
parser = argparse.ArgumentParser(description='render mesh')
parser.add_argument('--testdir', default='',
help='path to test dir')
parser.add_argument('--seqname', default='camel',
help='sequence to test')
parser.add_argument('--watertight', default='no',
help='watertight remesh')
parser.add_argument('--outpath', default='/data/gengshay/output.gif',
help='output path')
parser.add_argument('--cam_type', default='perspective',
help='camera model, orthographic or perspective')
parser.add_argument('--append_img', default='no',
help='whether append images before the seq')
parser.add_argument('--append_render', default='yes',
help='whether append renderings')
parser.add_argument('--nosmooth', dest='smooth', action='store_false',
help='whether to smooth vertex colors and positions')
parser.add_argument('--gray', dest='gray', action='store_true',
help='whether to render with gray texture')
parser.add_argument('--overlay', dest='overlay',action='store_true',
help='whether to overlay with the input')
parser.add_argument('--vis_bones', dest='vis_bones',action='store_true',
help='whether show transparent surface and vis bones')
parser.add_argument('--freeze', dest='freeze', action='store_true',
help='freeze object at frist frame')
args = parser.parse_args()
renderer_softflf = sr.SoftRenderer(image_size=256,dist_func='hard' ,aggr_func_alpha='hard',
camera_mode='look_at',perspective=False, aggr_func_rgb='hard',
light_mode='vertex', light_intensity_ambient=1.,light_intensity_directionals=0.)
def preprocess_image(img,mask,imgsize):
if len(img.shape) == 2:
img = np.repeat(np.expand_dims(img, 2), 3, axis=2)
if mask.shape[0]!=img.shape[0] or mask.shape[1]!=img.shape[1]:
mask = cv2.resize(mask, img.shape[:2][::-1],interpolation=cv2.INTER_NEAREST)[:,:,None]
mask = mask[:,:,:1]
# crop box
indices = np.where(mask>0); xid = indices[1]; yid = indices[0]
center = ( (xid.max()+xid.min())//2, (yid.max()+yid.min())//2)
length = ( (xid.max()-xid.min())//2, (yid.max()-yid.min())//2)
maxlength = int(1.2*max(length))
length = (maxlength,maxlength)
alp = 2*length[0]/float(imgsize)
refpp = np.asarray(center)/(imgsize/2.) - 1
return alp, refpp,center,length[0]
def draw_joints_on_image(rgb_img, joints, visibility, region_colors, marker_types):
joints = joints[:, ::-1] # OpenCV works in (x, y) rather than (i, j)
disp_img = rgb_img.copy()
for joint_coord, visible, color, marker_type in zip(joints, visibility, region_colors, marker_types):
if visible:
joint_coord = joint_coord.astype(int)
cv2.drawMarker(disp_img, tuple(joint_coord), color.tolist(), marker_type, 30, thickness = 10)
return disp_img
def remesh(mesh):
mesh.export('tmp/input.obj')
print(subprocess.check_output(['./Manifold/build/manifold', 'tmp/input.obj', 'tmp/output.obj', '10000']))
mesh = trimesh.load('tmp/output.obj',process=False)
return mesh
def main():
print(args.testdir)
# store all the data
all_anno = []
all_mesh = []
all_bone = []
all_cam = []
all_fr = []
config = configparser.RawConfigParser()
config.read('configs/%s.config'%args.seqname)
datapath = str(config.get('data', 'datapath'))
init_frame = int(config.get('data', 'init_frame'))
end_frame = int(config.get('data', 'end_frame'))
dframe = int(config.get('data', 'dframe'))
for name in sorted(glob.glob('%s/*'%datapath))[init_frame:end_frame][::dframe]:
rgb_img = cv2.imread(name)
sil_img = cv2.imread(name.replace('JPEGImages', 'Annotations').replace('.jpg', '.png'),0)[:,:,None]
all_anno.append([rgb_img,sil_img,0,0,name])
seqname = name.split('/')[-2]
fr = int(name.split('/')[-1].split('.')[-2])
all_fr.append(fr)
print('%s/%d'%(seqname, fr))
try:
try:
mesh = trimesh.load('%s/pred%d.ply'%(args.testdir, fr),process=False)
except:
mesh = trimesh.load('%s/pred%d.obj'%(args.testdir, fr),process=False)
trimesh.repair.fix_inversion(mesh)
if args.watertight=='yes':
mesh = remesh(mesh)
if args.gray:
mesh.visual.vertex_colors[:,:3]=64
if args.overlay:
mesh.visual.vertex_colors[:,:2]=0
mesh.visual.vertex_colors[:,2]=255
all_mesh.append(mesh)
cam = np.loadtxt('%s/cam%d.txt'%(args.testdir,fr))
all_cam.append(cam)
all_bone.append(trimesh.load('%s/gauss%d.ply'%(args.testdir, fr),process=False))
except: print('no mesh found')
# add bones?
num_original_verts = []
num_original_faces = []
if args.vis_bones:
for i in range(len(all_mesh)):
all_mesh[i].visual.vertex_colors[:,-1]=192 # alpha
num_original_verts.append( all_mesh[i].vertices.shape[0])
num_original_faces.append( all_mesh[i].faces.shape[0] )
all_mesh[i] = trimesh.util.concatenate([all_mesh[i], all_bone[i]])
# store all the results
input_size = all_anno[0][0].shape[:2]
output_size = (int(input_size[0] * 480/input_size[1]), 480)# 270x480
frames=[]
if args.append_img=="yes":
if args.append_render=='yes':
if args.freeze: napp_fr = 30
else: napp_fr = int(len(all_anno)//5)
for i in range(napp_fr):
frames.append(cv2.resize(all_anno[0][0],output_size[::-1])[:,:,::-1])
else:
for i in range(len(all_anno)):
#frames.append(cv2.resize(all_anno[i][1],output_size[::-1])*255) # silhouette
frames.append(cv2.resize(all_anno[i][0],output_size[::-1])[:,:,::-1]) # frame
#strx = sorted(glob.glob('%s/*'%datapath))[init_frame:end_frame][::dframe][i]# flow
#strx = strx.replace('JPEGImages', 'FlowBW')
#flowimg = cv2.imread('%s/vis-%s'%(strx.rsplit('/',1)[0],strx.rsplit('/',1)[1]))
#frames.append(cv2.resize(flowimg,output_size[::-1])[:,:,::-1])
theta = 7*np.pi/9
light_pose = np.asarray([[1,0,0,0],[0,np.cos(theta),-np.sin(theta),0],[0,np.sin(theta),np.cos(theta),0],[0,0,0,1]])
if args.freeze:
size = 150
else:
size = len(all_anno)
for i in range(size):
if args.append_render=='no':break
# render flow between mesh 1 and 2
if args.freeze:
print(i)
refimg = all_anno[0][0]
img_size = max(refimg.shape)
refmesh = all_mesh[0]
refmesh.vertices -= refmesh.vertices.mean(0)[None]
refmesh.vertices /= 1.2*np.abs(refmesh.vertices).max()
refcam = all_cam[0].copy()
refcam[:3,:3] = refcam[:3,:3].dot(cv2.Rodrigues(np.asarray([0.,-i*2*np.pi/size,0.]))[0])
refcam[:2,3] = 0 # trans xy
refcam[2,3] = 20 # depth
if args.cam_type=='perspective':
refcam[3,2] = refimg.shape[1]/2 # px py
refcam[3,3] = refimg.shape[0]/2 # px py
refcam[3,:2] = 8*img_size/2 # fl
else:
refcam[3,2] = refimg.shape[1]/2 # px py
refcam[3,3] = refimg.shape[1]/2 # px py
refcam[3,:2] =0.5 * img_size/2 # fl
else:
refimg, refsil, refkp, refvis, refname = all_anno[i]
print('%s'%(refname))
img_size = max(refimg.shape)
renderer_softflf.rasterizer.image_size = img_size
refmesh = all_mesh[i]
refcam = all_cam[i]
currcam = np.concatenate([refcam[:3,:4],np.asarray([[0,0,0,1]])],0)
if i==0:
initcam = currcam.copy()
refface = torch.Tensor(refmesh.faces[None]).cuda()
verts = torch.Tensor(refmesh.vertices[None]).cuda()
Rmat = torch.Tensor(refcam[None,:3,:3]).cuda()
Tmat = torch.Tensor(refcam[None,:3,3]).cuda()
ppoint =refcam[3,2:4]
scale = refcam[3,0]
verts = obj_to_cam(verts, Rmat, Tmat,nmesh=1,n_hypo=1,skin=None)
if args.cam_type != 'perspective':
verts[:,:,1] = ppoint[1]+verts[:,:, 1]*scale[0]
verts[:,:,0] = ppoint[0]+verts[:,:, 0]*scale[0]
verts[:,:,2] += (5+verts[:,:,2].min())
r = OffscreenRenderer(img_size, img_size)
if args.overlay:
bgcolor=[0., 0., 0.]
else:
bgcolor=[1.,1.,1.]
scene = Scene(ambient_light=0.4*np.asarray([1.,1.,1.,1.]), bg_color=bgcolor)
direc_l = pyrender.DirectionalLight(color=np.ones(3), intensity=6.0)
colors = refmesh.visual.vertex_colors
colors= np.concatenate([0.6*colors[:,:3].astype(np.uint8), colors[:,3:]],-1) # avoid overexposure
smooth=args.smooth
if args.freeze:
tbone = 0
else:
tbone = i
if args.vis_bones:
mesh = trimesh.Trimesh(vertices=np.asarray(verts[0,:num_original_verts[tbone],:3].cpu()), faces=np.asarray(refface[0,:num_original_faces[tbone]].cpu()),vertex_colors=colors[:num_original_verts[tbone]])
meshr = Mesh.from_trimesh(mesh,smooth=smooth)
meshr._primitives[0].material.RoughnessFactor=.5
scene.add_node( Node(mesh=meshr ))
mesh2 = trimesh.Trimesh(vertices=np.asarray(verts[0,num_original_verts[tbone]:,:3].cpu()), faces=np.asarray(refface[0,num_original_faces[tbone]:].cpu()-num_original_verts[tbone]),vertex_colors=colors[num_original_verts[tbone]:])
mesh2=Mesh.from_trimesh(mesh2,smooth=smooth)
mesh2._primitives[0].material.RoughnessFactor=.5
scene.add_node( Node(mesh=mesh2))
else:
mesh = trimesh.Trimesh(vertices=np.asarray(verts[0,:,:3].cpu()), faces=np.asarray(refface[0].cpu()),vertex_colors=colors)
meshr = Mesh.from_trimesh(mesh,smooth=smooth)
meshr._primitives[0].material.RoughnessFactor=.5
scene.add_node( Node(mesh=meshr ))
if not args.overlay:
floor_mesh = trimesh.load('./database/misc/wood.obj',process=False)
floor_mesh.vertices = np.concatenate([floor_mesh.vertices[:,:1], floor_mesh.vertices[:,2:3], floor_mesh.vertices[:,1:2]],-1 )
xfloor = 10*mesh.vertices[:,0].min() + (10*mesh.vertices[:,0].max()-10*mesh.vertices[:,0].min())*(floor_mesh.vertices[:,0:1] - floor_mesh.vertices[:,0].min())/(floor_mesh.vertices[:,0].max()-floor_mesh.vertices[:,0].min())
yfloor = floor_mesh.vertices[:,1:2]; yfloor[:] = (mesh.vertices[:,1].max())
zfloor = 0.5*mesh.vertices[:,2].min() + (10*mesh.vertices[:,2].max()-0.5*mesh.vertices[:,2].min())*(floor_mesh.vertices[:,2:3] - floor_mesh.vertices[:,2].min())/(floor_mesh.vertices[:,2].max()-floor_mesh.vertices[:,2].min())
floor_mesh.vertices = np.concatenate([xfloor,yfloor,zfloor],-1)
floor_mesh = trimesh.Trimesh(floor_mesh.vertices, floor_mesh.faces, vertex_colors=255*np.ones((4,4), dtype=np.uint8))
scene.add_node( Node(mesh=Mesh.from_trimesh(floor_mesh))) # overrides the prev. one
if args.cam_type=='perspective':
cam = IntrinsicsCamera(
scale,
scale,
ppoint[0],
ppoint[1],
znear=1e-3,zfar=1000)
else:
cam = pyrender.OrthographicCamera(xmag=1., ymag=1.)
cam_pose = -np.eye(4); cam_pose[0,0]=1; cam_pose[-1,-1]=1
cam_node = scene.add(cam, pose=cam_pose)
direc_l_node = scene.add(direc_l, pose=light_pose)
if args.vis_bones:
color, depth = r.render(scene,flags=pyrender.RenderFlags.SHADOWS_DIRECTIONAL)
else:
color, depth = r.render(scene,flags=pyrender.RenderFlags.SHADOWS_DIRECTIONAL | pyrender.RenderFlags.SKIP_CULL_FACES)
r.delete()
color = color[:refimg.shape[0],:refimg.shape[1],:3]
if args.overlay:
color = cv2.addWeighted(color, 0.5, refimg[:,:,::-1], 0.5, 0)
color = cv2.resize(color, output_size[::-1])
frames.append(color)
imageio.mimsave('%s'%args.outpath, frames, duration=5./len(frames))
if __name__ == '__main__':
main()
| apache-2.0 |
cloud-fan/spark | python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby.py | 14 | 24000 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class OpsOnDiffFramesGroupByTest(PandasOnSparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
def test_groupby_different_lengths(self):
pdfs1 = [
pd.DataFrame({"c": [4, 2, 7, 3, None, 1, 1, 1, 2], "d": list("abcdefght")}),
pd.DataFrame({"c": [4, 2, 7, None, 1, 1, 2], "d": list("abcdefg")}),
pd.DataFrame({"c": [4, 2, 7, 3, None, 1, 1, 1, 2, 2], "d": list("abcdefghti")}),
]
pdfs2 = [
pd.DataFrame({"a": [1, 2, 6, 4, 4, 6, 4, 3, 7], "b": [4, 2, 7, 3, 3, 1, 1, 1, 2]}),
pd.DataFrame({"a": [1, 2, 6, 4, 4, 6, 4, 7], "b": [4, 2, 7, 3, 3, 1, 1, 2]}),
pd.DataFrame({"a": [1, 2, 6, 4, 4, 6, 4, 3, 7], "b": [4, 2, 7, 3, 3, 1, 1, 1, 2]}),
]
for pdf1, pdf2 in zip(pdfs1, pdfs2):
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("c").reset_index(drop=True)
self.assert_eq(
sort(psdf1.groupby(psdf2.a, as_index=as_index).sum()),
sort(pdf1.groupby(pdf2.a, as_index=as_index).sum()),
almost=as_index,
)
self.assert_eq(
sort(psdf1.groupby(psdf2.a, as_index=as_index).c.sum()),
sort(pdf1.groupby(pdf2.a, as_index=as_index).c.sum()),
almost=as_index,
)
self.assert_eq(
sort(psdf1.groupby(psdf2.a, as_index=as_index)["c"].sum()),
sort(pdf1.groupby(pdf2.a, as_index=as_index)["c"].sum()),
almost=as_index,
)
def test_groupby_multiindex_columns(self):
pdf1 = pd.DataFrame(
{("y", "c"): [4, 2, 7, 3, None, 1, 1, 1, 2], ("z", "d"): list("abcdefght")}
)
pdf2 = pd.DataFrame(
{("x", "a"): [1, 2, 6, 4, 4, 6, 4, 3, 7], ("x", "b"): [4, 2, 7, 3, 3, 1, 1, 1, 2]}
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
psdf1.groupby(psdf2[("x", "a")]).sum().sort_index(),
pdf1.groupby(pdf2[("x", "a")]).sum().sort_index(),
)
self.assert_eq(
psdf1.groupby(psdf2[("x", "a")], as_index=False)
.sum()
.sort_values(("y", "c"))
.reset_index(drop=True),
pdf1.groupby(pdf2[("x", "a")], as_index=False)
.sum()
.sort_values(("y", "c"))
.reset_index(drop=True),
)
self.assert_eq(
psdf1.groupby(psdf2[("x", "a")])[[("y", "c")]].sum().sort_index(),
pdf1.groupby(pdf2[("x", "a")])[[("y", "c")]].sum().sort_index(),
)
def test_split_apply_combine_on_series(self):
pdf1 = pd.DataFrame({"C": [0.362, 0.227, 1.267, -0.562], "B": [1, 2, 3, 4]})
pdf2 = pd.DataFrame({"A": [1, 1, 2, 2]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
with self.subTest(as_index=as_index):
self.assert_eq(
sort(psdf1.groupby(psdf2.A, as_index=as_index).sum()),
sort(pdf1.groupby(pdf2.A, as_index=as_index).sum()),
)
self.assert_eq(
sort(psdf1.groupby(psdf2.A, as_index=as_index).B.sum()),
sort(pdf1.groupby(pdf2.A, as_index=as_index).B.sum()),
)
self.assert_eq(
sort(psdf1.groupby([psdf1.C, psdf2.A], as_index=as_index).sum()),
sort(pdf1.groupby([pdf1.C, pdf2.A], as_index=as_index).sum()),
)
self.assert_eq(
sort(psdf1.groupby([psdf1.C + 1, psdf2.A], as_index=as_index).sum()),
sort(pdf1.groupby([pdf1.C + 1, pdf2.A], as_index=as_index).sum()),
)
self.assert_eq(
psdf1.B.groupby(psdf2.A).sum().sort_index(),
pdf1.B.groupby(pdf2.A).sum().sort_index(),
)
self.assert_eq(
(psdf1.B + 1).groupby(psdf2.A).sum().sort_index(),
(pdf1.B + 1).groupby(pdf2.A).sum().sort_index(),
)
self.assert_eq(
psdf1.B.groupby(psdf2.A.rename()).sum().sort_index(),
pdf1.B.groupby(pdf2.A.rename()).sum().sort_index(),
)
self.assert_eq(
psdf1.B.rename().groupby(psdf2.A).sum().sort_index(),
pdf1.B.rename().groupby(pdf2.A).sum().sort_index(),
)
self.assert_eq(
psdf1.B.rename().groupby(psdf2.A.rename()).sum().sort_index(),
pdf1.B.rename().groupby(pdf2.A.rename()).sum().sort_index(),
)
def test_aggregate(self):
pdf1 = pd.DataFrame({"C": [0.362, 0.227, 1.267, -0.562], "B": [1, 2, 3, 4]})
pdf2 = pd.DataFrame({"A": [1, 1, 2, 2]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
with self.subTest(as_index=as_index):
self.assert_eq(
sort(psdf1.groupby(psdf2.A, as_index=as_index).agg("sum")),
sort(pdf1.groupby(pdf2.A, as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(psdf1.groupby(psdf2.A, as_index=as_index).agg({"B": "min", "C": "sum"})),
sort(pdf1.groupby(pdf2.A, as_index=as_index).agg({"B": "min", "C": "sum"})),
)
self.assert_eq(
sort(
psdf1.groupby(psdf2.A, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
sort(
pdf1.groupby(pdf2.A, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
)
self.assert_eq(
sort(psdf1.groupby([psdf1.C, psdf2.A], as_index=as_index).agg("sum")),
sort(pdf1.groupby([pdf1.C, pdf2.A], as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(psdf1.groupby([psdf1.C + 1, psdf2.A], as_index=as_index).agg("sum")),
sort(pdf1.groupby([pdf1.C + 1, pdf2.A], as_index=as_index).agg("sum")),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("Y", "C"), ("X", "B")])
pdf1.columns = columns
psdf1.columns = columns
columns = pd.MultiIndex.from_tuples([("X", "A")])
pdf2.columns = columns
psdf2.columns = columns
for as_index in [True, False]:
stats_psdf = psdf1.groupby(psdf2[("X", "A")], as_index=as_index).agg(
{("X", "B"): "min", ("Y", "C"): "sum"}
)
stats_pdf = pdf1.groupby(pdf2[("X", "A")], as_index=as_index).agg(
{("X", "B"): "min", ("Y", "C"): "sum"}
)
self.assert_eq(
stats_psdf.sort_values(by=[("X", "B"), ("Y", "C")]).reset_index(drop=True),
stats_pdf.sort_values(by=[("X", "B"), ("Y", "C")]).reset_index(drop=True),
)
stats_psdf = psdf1.groupby(psdf2[("X", "A")]).agg(
{("X", "B"): ["min", "max"], ("Y", "C"): "sum"}
)
stats_pdf = pdf1.groupby(pdf2[("X", "A")]).agg(
{("X", "B"): ["min", "max"], ("Y", "C"): "sum"}
)
self.assert_eq(
stats_psdf.sort_values(
by=[("X", "B", "min"), ("X", "B", "max"), ("Y", "C", "sum")]
).reset_index(drop=True),
stats_pdf.sort_values(
by=[("X", "B", "min"), ("X", "B", "max"), ("Y", "C", "sum")]
).reset_index(drop=True),
)
def test_duplicated_labels(self):
pdf1 = pd.DataFrame({"A": [3, 2, 1]})
pdf2 = pd.DataFrame({"A": [1, 2, 3]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
psdf1.groupby(psdf2.A).sum().sort_index(), pdf1.groupby(pdf2.A).sum().sort_index()
)
self.assert_eq(
psdf1.groupby(psdf2.A, as_index=False).sum().sort_values("A").reset_index(drop=True),
pdf1.groupby(pdf2.A, as_index=False).sum().sort_values("A").reset_index(drop=True),
)
def test_apply(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
pkey = pd.Series([1, 1, 2, 3, 5, 8])
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
psdf.groupby(kkey).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pkey).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)["a"].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pkey)["a"].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)[["a"]].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pkey)[["a"]].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", kkey]).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(["a", pkey]).apply(lambda x: x + x.min()).sort_index(),
)
def test_transform(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
pkey = pd.Series([1, 1, 2, 3, 5, 8])
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
psdf.groupby(kkey).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pkey).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)["a"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pkey)["a"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)[["a"]].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pkey)[["a"]].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", kkey]).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(["a", pkey]).transform(lambda x: x + x.min()).sort_index(),
)
def test_filter(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
pkey = pd.Series([1, 1, 2, 3, 5, 8])
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
psdf.groupby(kkey).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pkey).filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)["a"].filter(lambda x: any(x == 2)).sort_index(),
pdf.groupby(pkey)["a"].filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pkey)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", kkey]).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(["a", pkey]).filter(lambda x: any(x.a == 2)).sort_index(),
)
def test_head(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
)
pkey = pd.Series([1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3)
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
pdf.groupby(pkey).head(2).sort_index(), psdf.groupby(kkey).head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(2).sort_index(), psdf.groupby("a")["b"].head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(2).sort_index(),
psdf.groupby("a")[["b"]].head(2).sort_index(),
)
self.assert_eq(
pdf.groupby([pkey, "b"]).head(2).sort_index(),
psdf.groupby([kkey, "b"]).head(2).sort_index(),
)
def test_cumcount(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
)
pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3)
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
for ascending in [True, False]:
self.assert_eq(
psdf.groupby(kkey).cumcount(ascending=ascending).sort_index(),
pdf.groupby(pkey).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)["a"].cumcount(ascending=ascending).sort_index(),
pdf.groupby(pkey)["a"].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)[["a"]].cumcount(ascending=ascending).sort_index(),
pdf.groupby(pkey)[["a"]].cumcount(ascending=ascending).sort_index(),
)
def test_cummin(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
)
pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3)
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
psdf.groupby(kkey).cummin().sort_index(), pdf.groupby(pkey).cummin().sort_index()
)
self.assert_eq(
psdf.groupby(kkey)["a"].cummin().sort_index(),
pdf.groupby(pkey)["a"].cummin().sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)[["a"]].cummin().sort_index(),
pdf.groupby(pkey)[["a"]].cummin().sort_index(),
)
def test_cummax(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
)
pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3)
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
psdf.groupby(kkey).cummax().sort_index(), pdf.groupby(pkey).cummax().sort_index()
)
self.assert_eq(
psdf.groupby(kkey)["a"].cummax().sort_index(),
pdf.groupby(pkey)["a"].cummax().sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)[["a"]].cummax().sort_index(),
pdf.groupby(pkey)[["a"]].cummax().sort_index(),
)
def test_cumsum(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
)
pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3)
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
psdf.groupby(kkey).cumsum().sort_index(), pdf.groupby(pkey).cumsum().sort_index()
)
self.assert_eq(
psdf.groupby(kkey)["a"].cumsum().sort_index(),
pdf.groupby(pkey)["a"].cumsum().sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)[["a"]].cumsum().sort_index(),
pdf.groupby(pkey)[["a"]].cumsum().sort_index(),
)
def test_cumprod(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
)
pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3)
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
psdf.groupby(kkey).cumprod().sort_index(),
pdf.groupby(pkey).cumprod().sort_index(),
almost=True,
)
self.assert_eq(
psdf.groupby(kkey)["a"].cumprod().sort_index(),
pdf.groupby(pkey)["a"].cumprod().sort_index(),
almost=True,
)
self.assert_eq(
psdf.groupby(kkey)[["a"]].cumprod().sort_index(),
pdf.groupby(pkey)[["a"]].cumprod().sort_index(),
almost=True,
)
def test_diff(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
}
)
pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3)
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
psdf.groupby(kkey).diff().sort_index(), pdf.groupby(pkey).diff().sort_index()
)
self.assert_eq(
psdf.groupby(kkey)["a"].diff().sort_index(), pdf.groupby(pkey)["a"].diff().sort_index()
)
self.assert_eq(
psdf.groupby(kkey)[["a"]].diff().sort_index(),
pdf.groupby(pkey)[["a"]].diff().sort_index(),
)
self.assert_eq(psdf.groupby(kkey).diff().sum(), pdf.groupby(pkey).diff().sum().astype(int))
self.assert_eq(psdf.groupby(kkey)["a"].diff().sum(), pdf.groupby(pkey)["a"].diff().sum())
def test_rank(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
)
pkey = pd.Series([1, 1, 2, 3, 5, 8] * 3)
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
psdf.groupby(kkey).rank().sort_index(), pdf.groupby(pkey).rank().sort_index()
)
self.assert_eq(
psdf.groupby(kkey)["a"].rank().sort_index(), pdf.groupby(pkey)["a"].rank().sort_index()
)
self.assert_eq(
psdf.groupby(kkey)[["a"]].rank().sort_index(),
pdf.groupby(pkey)[["a"]].rank().sort_index(),
)
self.assert_eq(psdf.groupby(kkey).rank().sum(), pdf.groupby(pkey).rank().sum())
self.assert_eq(psdf.groupby(kkey)["a"].rank().sum(), pdf.groupby(pkey)["a"].rank().sum())
@unittest.skipIf(pd.__version__ < "0.24.0", "not supported before pandas 0.24.0")
def test_shift(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 2, 2, 3, 3] * 3,
"b": [1, 1, 2, 2, 3, 4] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
)
pkey = pd.Series([1, 1, 2, 2, 3, 4] * 3)
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
psdf.groupby(kkey).shift().sort_index(), pdf.groupby(pkey).shift().sort_index()
)
self.assert_eq(
psdf.groupby(kkey)["a"].shift().sort_index(),
pdf.groupby(pkey)["a"].shift().sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)[["a"]].shift().sort_index(),
pdf.groupby(pkey)[["a"]].shift().sort_index(),
)
self.assert_eq(
psdf.groupby(kkey).shift().sum(), pdf.groupby(pkey).shift().sum().astype(int)
)
self.assert_eq(psdf.groupby(kkey)["a"].shift().sum(), pdf.groupby(pkey)["a"].shift().sum())
def test_fillna(self):
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
}
)
pkey = pd.Series([1, 1, 2, 2] * 3)
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
psdf.groupby(kkey).fillna(0).sort_index(), pdf.groupby(pkey).fillna(0).sort_index()
)
self.assert_eq(
psdf.groupby(kkey)["C"].fillna(0).sort_index(),
pdf.groupby(pkey)["C"].fillna(0).sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)[["C"]].fillna(0).sort_index(),
pdf.groupby(pkey)[["C"]].fillna(0).sort_index(),
)
self.assert_eq(
psdf.groupby(kkey).fillna(method="bfill").sort_index(),
pdf.groupby(pkey).fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)["C"].fillna(method="bfill").sort_index(),
pdf.groupby(pkey)["C"].fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)[["C"]].fillna(method="bfill").sort_index(),
pdf.groupby(pkey)[["C"]].fillna(method="bfill").sort_index(),
)
self.assert_eq(
psdf.groupby(kkey).fillna(method="ffill").sort_index(),
pdf.groupby(pkey).fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)["C"].fillna(method="ffill").sort_index(),
pdf.groupby(pkey)["C"].fillna(method="ffill").sort_index(),
)
self.assert_eq(
psdf.groupby(kkey)[["C"]].fillna(method="ffill").sort_index(),
pdf.groupby(pkey)[["C"]].fillna(method="ffill").sort_index(),
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_ops_on_diff_frames_groupby import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
tylerjereddy/scipy | doc/source/tutorial/stats/plots/qmc_plot_conv_mc.py | 12 | 1704 | """Integration convergence.
The function is a synthetic example specifically designed
to verify the correctness of the implementation [1]_.
References
----------
.. [1] Art B. Owen. On dropping the first Sobol' point. arXiv 2008.08051,
2020.
"""
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
n_conv = 99
ns_gen = 2 ** np.arange(4, 13) # 13
def art_2(sample):
# dim 3, true value 5/3 + 5*(5 - 1)/4
return np.sum(sample, axis=1) ** 2
functions = namedtuple('functions', ['name', 'func', 'dim', 'ref'])
case = functions('Art 2', art_2, 5, 5 / 3 + 5 * (5 - 1) / 4)
def conv_method(sampler, func, n_samples, n_conv, ref):
samples = [sampler(n_samples) for _ in range(n_conv)]
samples = np.array(samples)
evals = [np.sum(func(sample)) / n_samples for sample in samples]
squared_errors = (ref - np.array(evals)) ** 2
rmse = (np.sum(squared_errors) / n_conv) ** 0.5
return rmse
# Analysis
sample_mc_rmse = []
rng = np.random.default_rng()
for ns in ns_gen:
# Monte Carlo
sampler_mc = lambda x: rng.random((x, case.dim))
conv_res = conv_method(sampler_mc, case.func, ns, n_conv, case.ref)
sample_mc_rmse.append(conv_res)
sample_mc_rmse = np.array(sample_mc_rmse)
# Plot
fig, ax = plt.subplots(figsize=(5, 3))
ax.set_aspect('equal')
ratio = sample_mc_rmse[0] / ns_gen[0] ** (-1 / 2)
ax.plot(ns_gen, ns_gen ** (-1 / 2) * ratio, ls='-', c='k')
ax.scatter(ns_gen, sample_mc_rmse)
ax.set_xlabel(r'$N_s$')
ax.set_xscale('log')
ax.set_xticks(ns_gen)
ax.set_xticklabels([fr'$2^{{{ns}}}$' for ns in np.arange(4, 13)])
ax.set_ylabel(r'$\log (\epsilon)$')
ax.set_yscale('log')
fig.tight_layout()
plt.show()
| bsd-3-clause |
jmuhlich/pysb | pysb/examples/cupsoda/run_michment_cupsoda.py | 5 | 1878 | from pysb.examples.michment import model
from pysb.simulator.cupsoda import run_cupsoda
import numpy as np
import matplotlib.pyplot as plt
import itertools
def run():
# factors to multiply the values of the initial conditions
multipliers = np.linspace(0.8, 1.2, 11)
# 2D array of initial concentrations
initial_concentrations = [
multipliers * ic.value.value for ic in model.initials
]
# Cartesian product of initial concentrations
cartesian_product = itertools.product(*initial_concentrations)
# the Cartesian product object must be cast to a list, then to a numpy array
# and transposed to give a (n_species x n_vals) matrix of initial concentrations
initials_matrix = np.array(list(cartesian_product)).T
# we can now construct the initials dictionary
initials = {
ic.pattern: initials_matrix[i] for i, ic in enumerate(model.initials)
}
# simulation time span and output points
tspan = np.linspace(0, 50, 501)
# run_cupsoda returns a 3D array of species and observables trajectories
trajectories = run_cupsoda(model, tspan, initials=initials,
integrator_options={'atol': 1e-10, 'rtol': 1e-4},
verbose=True)
# extract the trajectories for the 'Product' into a numpy array and
# transpose to aid in plotting
x = np.array([tr['Product'] for tr in trajectories]).T
# plot the mean, minimum, and maximum concentrations at each time point
plt.plot(tspan, x.mean(axis=1), 'b', lw=3, label="Product")
plt.plot(tspan, x.max(axis=1), 'b--', lw=2, label="min/max")
plt.plot(tspan, x.min(axis=1), 'b--', lw=2)
# define the axis labels and legend
plt.xlabel('time')
plt.ylabel('concentration')
plt.legend(loc='upper left')
# show the plot
plt.show()
if __name__ == '__main__':
run()
| bsd-2-clause |
wangxianliang/facenet | tmp/test_align.py | 4 | 1503 | import facenet
import os
import matplotlib.pyplot as plt
import numpy as np
def main():
image_size = 96
old_dataset = '/home/david/datasets/facescrub/fs_aligned_new_oean/'
new_dataset = '/home/david/datasets/facescrub/facescrub_110_96/'
eq = 0
num = 0
l = []
dataset = facenet.get_dataset(old_dataset)
for cls in dataset:
new_class_dir = os.path.join(new_dataset, cls.name)
for image_path in cls.image_paths:
try:
filename = os.path.splitext(os.path.split(image_path)[1])[0]
new_filename = os.path.join(new_class_dir, filename+'.png')
#print(image_path)
if os.path.exists(new_filename):
a = facenet.load_data([image_path, new_filename], False, False, image_size, do_prewhiten=False)
if np.array_equal(a[0], a[1]):
eq+=1
num+=1
err = np.sum(np.square(np.subtract(a[0], a[1])))
#print(err)
l.append(err)
if err>2000:
fig = plt.figure(1)
p1 = fig.add_subplot(121)
p1.imshow(a[0])
p2 = fig.add_subplot(122)
p2.imshow(a[1])
print('%6.1f: %s\n' % (err, new_filename))
pass
else:
pass
#print('File not found: %s' % new_filename)
except:
pass
if __name__ == '__main__':
main()
| mit |
bigdataelephants/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
billy-inn/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
all-umass/graphs | graphs/construction/saffron.py | 1 | 4509 | from __future__ import absolute_import, print_function
import numpy as np
import scipy.sparse as ss
import warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.preprocessing import normalize
from graphs import Graph
from ..mini_six import range
from .neighbors import neighbor_graph
__all__ = ['saffron']
def saffron(X, q=32, k=4, tangent_dim=1, curv_thresh=0.95, decay_rate=0.9,
max_iter=15, verbose=False):
'''
SAFFRON graph construction method.
X : (n,d)-array of coordinates
q : int, median number of candidate friends per vertex
k : int, number of friends to select per vertex, k < q
tangent_dim : int, dimensionality of manifold tangent space
curv_thresh : float, tolerance to curvature, lambda in the paper
decay_rate : float, controls step size per iteration, between 0 and 1
max_iter : int, cap on number of iterations
verbose : bool, print goodness measure per iteration when True
From "Tangent Space Guided Intelligent Neighbor Finding",
by Gashler & Martinez, 2011.
See http://axon.cs.byu.edu/papers/gashler2011ijcnn1.pdf
'''
n = len(X)
dist = pairwise_distances(X)
idx = np.argpartition(dist, q)[:, q]
# radius for finding candidate friends: median distance to qth neighbor
r = np.median(dist[np.arange(n), idx])
# make candidate graph + weights
W = neighbor_graph(dist, precomputed=True, epsilon=r).matrix('csr')
# NOTE: this differs from the paper, where W.data[:] = 1 initially
W.data[:] = 1 / W.data
# row normalize
normalize(W, norm='l1', axis=1, copy=False)
# XXX: hacky densify
W = W.toarray()
# iterate to learn optimal weights
prev_goodness = 1e-12
for it in range(max_iter):
goodness = 0
S = _estimate_tangent_spaces(X, W, tangent_dim)
# find aligned candidates
for i, row in enumerate(W):
nbrs = row.nonzero()[-1]
# compute alignment scores
edges = X[nbrs] - X[i]
edge_norms = (edges**2).sum(axis=1)
a1 = (edges.dot(S[i])**2).sum(axis=1) / edge_norms
a2 = (np.einsum('ij,ijk->ik', edges, S[nbrs])**2).sum(axis=1) / edge_norms
a3 = _principal_angle(S[i], S[nbrs]) ** 2
x = (np.minimum(curv_thresh, a1) *
np.minimum(curv_thresh, a2) *
np.minimum(curv_thresh, a3))
# decay weight of least-aligned candidates
excess = x.shape[0] - k
if excess > 0:
bad_idx = np.argpartition(x, excess-1)[:excess]
W[i, nbrs[bad_idx]] *= decay_rate
W[i] /= W[i].sum()
# update goodness measure (weighted alignment)
goodness += x.dot(W[i,nbrs])
if verbose: # pragma: no cover
goodness /= n
print(it, goodness, goodness / prev_goodness)
if goodness / prev_goodness <= 1.0001:
break
prev_goodness = goodness
else:
warnings.warn('Failed to converge after %d iterations.' % max_iter)
# use the largest k weights for each row of W, weighted by original distance
indptr, indices, data = [0], [], []
for i, row in enumerate(W):
nbrs = row.nonzero()[-1]
if len(nbrs) > k:
nbrs = nbrs[np.argpartition(row[nbrs], len(nbrs)-k)[-k:]]
indices.extend(nbrs)
indptr.append(len(nbrs))
data.extend(dist[i, nbrs])
indptr = np.cumsum(indptr)
data = np.array(data)
indices = np.array(indices)
W = ss.csr_matrix((data, indices, indptr), shape=W.shape)
return Graph.from_adj_matrix(W)
def _estimate_tangent_spaces(X, W, dim):
# compute many PCAs in batch
covs = np.empty(X.shape + (X.shape[1],))
for i, row in enumerate(W):
nbrs = row.nonzero()[-1]
xx = X[nbrs] * row[nbrs,None] # weight samples by W
xx -= xx.mean(axis=0)
covs[i] = xx.T.dot(xx)
# compute all the PCs at once
_, vecs = np.linalg.eigh(covs)
return vecs[:,:,-dim:]
def _principal_angle(a, B):
'''a is (d,t), B is (k,d,t)'''
# TODO: check case for t = d-1
if a.shape[1] == 1:
return a.T.dot(B)[0,:,0]
# find normals that maximize distance when projected
x1 = np.einsum('abc,adc->abd', B, B).dot(a) - a # b.dot(b.T).dot(a) - a
x2 = np.einsum('ab,cad->cbd', a.dot(a.T), B) - B # a.dot(a.T).dot(b) - b
xx = np.vstack((x1, x2))
# batch PCA (1st comp. only)
xx -= xx.mean(axis=1)[:,None]
c = np.einsum('abc,abd->acd', xx, xx)
_, vecs = np.linalg.eigh(c)
fpc = vecs[:,:,-1]
fpc1 = fpc[:len(x1)]
fpc2 = fpc[len(x1):]
# a.dot(fpc1).dot(b.dot(fpc2))
lhs = a.dot(fpc1.T).T
rhs = np.einsum('abc,ac->ab', B, fpc2)
return np.einsum('ij,ij->i', lhs, rhs)
| mit |
rizkiarm/LipNet | evaluation/confusion.py | 1 | 4493 | import nltk
import sys
import string
import os
import numpy as np
from sklearn import metrics
import matplotlib.pyplot as plt
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
# print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
arpabet = nltk.corpus.cmudict.dict()
def get_phonemes(word):
return [str(phoneme).translate(None, string.digits) for phoneme in arpabet[word][0]]
with open('phonemes.txt') as f:
labels = f.read().splitlines()
V1 = labels[0:7]
V2 = labels[7:10]
V3 = labels[10:14]
V4 = labels[14:17]
A = labels[17:21]
B = labels[21:23]
C = labels[23:27]
D = labels[27:31]
E = labels[31:34]
F = labels[34:36]
G = labels[36:38]
H = labels[38:42]
SCENARIOS = [
('Phonemes', labels),
('Lip-rounding based vowels', V1+V2+V3+V4),
('Alveolar-semivowels', A),
('Alveolar-fricatives', B),
('Alveolar', C),
('Palato-alveolar', D),
('Bilabial', E),
('Dental', F),
('Labio-dental', G),
('Velar', H)
]
def get_viseme(word):
phonemes = get_phonemes(word)
visemes = []
for phoneme in phonemes:
if phoneme in V1+V2+V3+V4:
visemes.append('V')
elif phoneme in A:
visemes.append('A')
elif phoneme in B:
visemes.append('B')
elif phoneme in C:
visemes.append('C')
elif phoneme in D:
visemes.append('D')
elif phoneme in E:
visemes.append('E')
elif phoneme in F:
visemes.append('F')
elif phoneme in G:
visemes.append('G')
elif phoneme in H:
visemes.append('H')
return visemes
def get_confusion_matrix(y_true, y_pred, labels, func):
# confusion_matrix = np.identity(len(labels))
confusion_matrix = np.zeros((len(labels),len(labels)))
for i in range(0,len(y_true)):
words_true = y_true[i].split(" ")
words_pred = y_pred[i].split(" ")
for j in range(0, len(words_true)):
phonemes_true = func(words_true[j])
phonemes_pred = func(words_pred[j])
max_length = min(len(phonemes_true),len(phonemes_pred))
phonemes_true = phonemes_true[:max_length]
phonemes_pred = phonemes_pred[:max_length]
try:
confusion_matrix = np.add(
confusion_matrix,
metrics.confusion_matrix(phonemes_true, phonemes_pred, labels=labels)
)
except:
continue
return confusion_matrix
y_true_path = sys.argv[1]
y_pred_path = sys.argv[2]
with open(y_true_path) as f:
y_true_r = f.read().splitlines()
with open(y_pred_path) as f:
y_pred_r = f.read().splitlines()
y_true = []
y_pred = []
for i in range(0,len(y_true_r)):
if y_true_r[i] in y_true:
continue
y_true.append(y_true_r[i])
y_pred.append(y_pred_r[i])
for k in range(0, len(SCENARIOS)):
_name = SCENARIOS[k][0]
_labels = SCENARIOS[k][1]
confusion_matrix = get_confusion_matrix(y_true,y_pred,_labels,get_phonemes)
plt.figure()
plot_confusion_matrix(confusion_matrix, classes=_labels, normalize=True,
title=_name)
# plt.show()
savepath = os.path.join('confusions', _name + '.png')
print savepath
plt.savefig(savepath, bbox_inches='tight')
# INTRA-VISEMES
viseme_name = 'Intra-visemes'
viseme_labels = ['V', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
confusion_matrix = get_confusion_matrix(y_true,y_pred,viseme_labels,get_viseme)
plt.figure()
plot_confusion_matrix(confusion_matrix, classes=viseme_labels, normalize=True,
title=viseme_name)
# plt.show()
savepath = os.path.join('confusions', viseme_name + '.png')
print savepath
plt.savefig(savepath, bbox_inches='tight') | mit |
lcharleux/abapy | doc/conf.py | 1 | 7511 | # -*- coding: utf-8 -*-
#
# abapy documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 31 15:44:59 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.pngmath',
#'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'matplotlib.sphinxext.mathmpl',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'ipython_directive',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'ipython_console_highlighting',
'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'abapy'
copyright = u'2012, Ludovic Charleux'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.3'
# The full version, including alpha/beta/rc tags.
release = '2.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'stickysidebar': True, 'sidebarwidth': 250}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'abapydoc'
html_copy_source = True
html_show_sourcelink = True
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'abapy.tex', u'abapy Documentation',
u'Ludovic Charleux', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'abapy', u'abapy Documentation',
[u'Ludovic Charleux'], 1)
]
| gpl-2.0 |
pianomania/scikit-learn | sklearn/linear_model/passive_aggressive.py | 28 | 11542 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None, average=False):
super(PassiveAggressiveClassifier, self).__init__(
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
average=average,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False,
average=False):
super(PassiveAggressiveRegressor, self).__init__(
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start,
average=average)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
awni/tensorflow | tensorflow/examples/skflow/iris_val_based_early_stopping.py | 2 | 2221 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
from tensorflow.contrib import skflow
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size=0.2, random_state=42)
val_monitor = skflow.monitors.ValidationMonitor(X_val, y_val,
early_stopping_rounds=200,
n_classes=3)
# classifier with early stopping on training data
classifier1 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier1.fit(X_train, y_train)
score1 = metrics.accuracy_score(y_test, classifier1.predict(X_test))
# classifier with early stopping on validation data
classifier2 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier2.fit(X_train, y_train, val_monitor)
score2 = metrics.accuracy_score(y_test, classifier2.predict(X_test))
# in many applications, the score is improved by using early stopping on val data
print(score2 > score1)
| apache-2.0 |
aminert/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
FrancoisRheaultUS/dipy | doc/examples/reconst_dki.py | 3 | 17441 | """
=====================================================================
Reconstruction of the diffusion signal with the kurtosis tensor model
=====================================================================
The diffusion kurtosis model is an expansion of the diffusion tensor model
(see :ref:`example_reconst_dti`). In addition to the diffusion tensor (DT), the
diffusion kurtosis model quantifies the degree to which water diffusion in
biological tissues is non-Gaussian using the kurtosis tensor (KT)
[Jensen2005]_.
Measurements of non-Gaussian diffusion from the diffusion kurtosis model are of
interest because they can be used to charaterize tissue microstructural
heterogeneity [Jensen2010]_. Moreover, DKI can be used to: 1) derive concrete
biophysical parameters, such as the density of axonal fibers and diffusion
tortuosity [Fierem2011]_ (see :ref:`example_reconst_dki_micro`); and 2)
resolve crossing fibers in tractography and to obtain invariant rotational
measures not limited to well-aligned fiber populations [NetoHe2015]_.
The diffusion kurtosis model expresses the diffusion-weighted signal as:
.. math::
S(n,b)=S_{0}e^{-bD(n)+\frac{1}{6}b^{2}D(n)^{2}K(n)}
where $\mathbf{b}$ is the applied diffusion weighting (which is dependent on
the measurement parameters), $S_0$ is the signal in the absence of diffusion
gradient sensitization, $\mathbf{D(n)}$ is the value of diffusion along
direction $\mathbf{n}$, and $\mathbf{K(n)}$ is the value of kurtosis along
direction $\mathbf{n}$. The directional diffusion $\mathbf{D(n)}$ and kurtosis
$\mathbf{K(n)}$ can be related to the diffusion tensor (DT) and kurtosis tensor
(KT) using the following equations:
.. math::
D(n)=\sum_{i=1}^{3}\sum_{j=1}^{3}n_{i}n_{j}D_{ij}
and
.. math::
K(n)=\frac{MD^{2}}{D(n)^{2}}\sum_{i=1}^{3}\sum_{j=1}^{3}\sum_{k=1}^{3}
\sum_{l=1}^{3}n_{i}n_{j}n_{k}n_{l}W_{ijkl}
where $D_{ij}$ are the elements of the second-order DT, and $W_{ijkl}$ the
elements of the fourth-order KT and $MD$ is the mean diffusivity. As the DT,
KT has antipodal symmetry and thus only 15 Wijkl elemments are needed to fully
characterize the KT:
.. math::
\begin{matrix} ( & W_{xxxx} & W_{yyyy} & W_{zzzz} & W_{xxxy} & W_{xxxz}
& ... \\
& W_{xyyy} & W_{yyyz} & W_{xzzz} & W_{yzzz} & W_{xxyy}
& ... \\
& W_{xxzz} & W_{yyzz} & W_{xxyz} & W_{xyyz} & W_{xyzz}
& & )\end{matrix}
In the following example we show how to fit the diffusion kurtosis model on
diffusion-weighted multi-shell datasets and how to estimate diffusion kurtosis
based statistics.
First, we import all relevant modules:
"""
import numpy as np
import matplotlib.pyplot as plt
import dipy.reconst.dki as dki
import dipy.reconst.dti as dti
from dipy.core.gradients import gradient_table
from dipy.data import get_fnames
from dipy.io.gradients import read_bvals_bvecs
from dipy.io.image import load_nifti
from dipy.segment.mask import median_otsu
from scipy.ndimage.filters import gaussian_filter
"""
DKI requires multi-shell data, i.e. data acquired from more than one non-zero
b-value. Here, we use fetch to download a multi-shell dataset which was kindly
provided by Hansen and Jespersen (more details about the data are provided in
their paper [Hansen2016]_). The total size of the downloaded data is 192
MBytes, however you only need to fetch it once.
"""
fraw, fbval, fbvec, t1_fname = get_fnames('cfin_multib')
data, affine = load_nifti(fraw)
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
gtab = gradient_table(bvals, bvecs)
"""
Function ``get_fnames`` downloads and outputs the paths of the data,
``load_nifti`` returns the data as a nibabel Nifti1Image object, and
``read_bvals_bvecs`` loads the arrays containing the information about the
b-values and b-vectors. These later arrays are converted to the GradientTable
object required for Dipy_'s data reconstruction.
Before fitting the data, we preform some data pre-processing. We first compute
a brain mask to avoid unnecessary calculations on the background of the image.
"""
maskdata, mask = median_otsu(data, vol_idx=[0, 1], median_radius=4, numpass=2,
autocrop=False, dilate=1)
"""
Since the diffusion kurtosis models involves the estimation of a large number
of parameters [TaxCMW2015]_ and since the non-Gaussian components of the
diffusion signal are more sensitive to artefacts [NetoHe2012]_, it might be
favorable to suppress the effects of noise and artefacts before diffusion
kurtosis fitting. In this example the effects of noise and artefacts are
suppress by using 3D Gaussian smoothing (with a Gaussian kernel with
fwhm=1.25) as suggested by pioneer DKI studies (e.g. [Jensen2005]_,
[NetoHe2012]_). Although here the Gaussian smoothing is used so that results
are comparable to these studies, it is important to note that more advanced
noise and artifact suppression algorithms are available in DIPY_, e.g. the
Marcenko-Pastur PCA denoising algorithm (:ref:`example-denoise-mppca`) and
the Gibbs artefact suppression algorithm (:ref:`example-denoise-gibbs`).
"""
fwhm = 1.25
gauss_std = fwhm / np.sqrt(8 * np.log(2)) # converting fwhm to Gaussian std
data_smooth = np.zeros(data.shape)
for v in range(data.shape[-1]):
data_smooth[..., v] = gaussian_filter(data[..., v], sigma=gauss_std)
"""
Now that we have loaded and pre-processed the data we can go forward
with DKI fitting. For this, the DKI model is first defined for the data's
GradientTable object by instantiating the DiffusionKurtosisModel object in the
following way:
"""
dkimodel = dki.DiffusionKurtosisModel(gtab)
"""
To fit the data using the defined model object, we call the ``fit`` function of
this object:
"""
dkifit = dkimodel.fit(data_smooth, mask=mask)
"""
The fit method creates a DiffusionKurtosisFit object, which contains all the
diffusion and kurtosis fitting parameters and other DKI attributes. For
instance, since the diffusion kurtosis model estimates the diffusion tensor,
all diffusion standard tensor statistics can be computed from the
DiffusionKurtosisFit instance. For example, we show below how to extract the
fractional anisotropy (FA), the mean diffusivity (MD), the axial diffusivity
(AD) and the radial diffusivity (RD) from the DiffusionKurtosisiFit instance.
"""
FA = dkifit.fa
MD = dkifit.md
AD = dkifit.ad
RD = dkifit.rd
"""
Note that these four standard measures could also be computed from DIPY's DTI
module. Computing these measures from both models should be analogous; however,
theoretically, the diffusion statistics from the kurtosis model are expected to
have better accuracy, since DKI's diffusion tensor are decoupled from higher
order terms effects [Veraar2011]_, [NetoHe2012]_. For comparison purposes,
we calculate below the FA, MD, AD, and RD using DIPY's ``TensorModel``.
"""
tenmodel = dti.TensorModel(gtab)
tenfit = tenmodel.fit(data_smooth, mask=mask)
dti_FA = tenfit.fa
dti_MD = tenfit.md
dti_AD = tenfit.ad
dti_RD = tenfit.rd
"""
The DT based measures can be easily visualized using matplotlib. For example,
the FA, MD, AD, and RD obtained from the diffusion kurtosis model (upper
panels) and the tensor model (lower panels) are plotted for a selected axial
slice. DTI's diffusion estimates present lower values than DKI's estimates,
showing that DTI's diffusion measurements are underestimated by higher order
effects.
"""
axial_slice = 9
fig1, ax = plt.subplots(2, 4, figsize=(12, 6),
subplot_kw={'xticks': [], 'yticks': []})
fig1.subplots_adjust(hspace=0.3, wspace=0.05)
ax.flat[0].imshow(FA[:, :, axial_slice].T, cmap='gray',
vmin=0, vmax=0.7, origin='lower')
ax.flat[0].set_title('FA (DKI)')
ax.flat[1].imshow(MD[:, :, axial_slice].T, cmap='gray',
vmin=0, vmax=2.0e-3, origin='lower')
ax.flat[1].set_title('MD (DKI)')
ax.flat[2].imshow(AD[:, :, axial_slice].T, cmap='gray',
vmin=0, vmax=2.0e-3, origin='lower')
ax.flat[2].set_title('AD (DKI)')
ax.flat[3].imshow(RD[:, :, axial_slice].T, cmap='gray',
vmin=0, vmax=2.0e-3, origin='lower')
ax.flat[3].set_title('RD (DKI)')
ax.flat[4].imshow(dti_FA[:, :, axial_slice].T, cmap='gray',
vmin=0, vmax=0.7, origin='lower')
ax.flat[4].set_title('FA (DTI)')
ax.flat[5].imshow(dti_MD[:, :, axial_slice].T, cmap='gray',
vmin=0, vmax=2.0e-3, origin='lower')
ax.flat[5].set_title('MD (DTI)')
ax.flat[6].imshow(dti_AD[:, :, axial_slice].T, cmap='gray',
vmin=0, vmax=2.0e-3, origin='lower')
ax.flat[6].set_title('AD (DTI)')
ax.flat[7].imshow(dti_RD[:, :, axial_slice].T, cmap='gray',
vmin=0, vmax=2.0e-3, origin='lower')
ax.flat[7].set_title('RD (DTI)')
plt.show()
fig1.savefig('Diffusion_tensor_measures_from_DTI_and_DKI.png')
"""
.. figure:: Diffusion_tensor_measures_from_DTI_and_DKI.png
:align: center
Diffusion tensor measures obtained from the diffusion tensor estimated
from DKI (upper panels) and DTI (lower panels).
In addition to the standard diffusion statistics, the DiffusionKurtosisFit
instance can be used to estimate the non-Gaussian measures of mean kurtosis
(MK), the axial kurtosis (AK) and the radial kurtosis (RK).
Kurtosis measures are susceptible to high amplitude outliers. The impact of
high amplitude kurtosis outliers can be removed by introducing as an optional
input the extremes of the typical values of kurtosis. Here these are assumed to
be on the range between 0 and 3):
"""
MK = dkifit.mk(0, 3)
AK = dkifit.ak(0, 3)
RK = dkifit.rk(0, 3)
"""
Now we are ready to plot the kurtosis standard measures using matplotlib:
"""
fig2, ax = plt.subplots(1, 3, figsize=(12, 6),
subplot_kw={'xticks': [], 'yticks': []})
fig2.subplots_adjust(hspace=0.3, wspace=0.05)
ax.flat[0].imshow(MK[:, :, axial_slice].T, cmap='gray', vmin=0, vmax=1.5,
origin='lower')
ax.flat[0].set_title('MK')
ax.flat[0].annotate('', fontsize=12, xy=(57, 30),
color='red',
xycoords='data', xytext=(30, 0),
textcoords='offset points',
arrowprops=dict(arrowstyle="->",
color='red'))
ax.flat[1].imshow(AK[:, :, axial_slice].T, cmap='gray', vmin=0, vmax=1.5,
origin='lower')
ax.flat[1].set_title('AK')
ax.flat[2].imshow(RK[:, :, axial_slice].T, cmap='gray', vmin=0, vmax=1.5,
origin='lower')
ax.flat[2].set_title('RK')
ax.flat[2].annotate('', fontsize=12, xy=(57, 30),
color='red',
xycoords='data', xytext=(30, 0),
textcoords='offset points',
arrowprops=dict(arrowstyle="->",
color='red'))
plt.show()
fig2.savefig('Kurtosis_tensor_standard_measures.png')
"""
.. figure:: Kurtosis_tensor_standard_measures.png
:align: center
DKI standard kurtosis measures.
The non-Gaussian behaviour of the diffusion signal is expected to be higher
when tissue water is confined by multiple compartments. MK is, therefore,
higher in white matter since it is highly compartmentalized by myelin sheaths.
These water diffusion compartmentalization is expected to be more pronounced
prependicularly to white matter fibers and thus the RK map presents higher
amplitudes than the AK map.
It is important to note that kurtosis estimates might presented negative
estimates in deep white matter regions (e.g. red arrow added in the figure
above). This negative kurtosis values are artefactual and might be induced by:
1) low radial diffusivities of aligned white matter - since it is very hard
to capture non-Gaussian information in radial direction due to it's low
diffusion decays, radial kurtosis estimates (and consequently the mean
kurtosis estimates) might have low robustness and tendency to exhibit negative
values [NetoHe2012]_;
2) Gibbs artefacts - MRI images might be corrupted by signal oscilation
artefact between tissue's edges if an inadequate number of high frequencies of
the k-space is sampled. These oscilations might have different signs on
images acquired with different diffusion-weighted and inducing negative biases
in kurtosis parametric maps [Perron2015]_, [NetoHe2018]_.
One can try to suppress this issue by using the more advance noise and artefact
suppression algorithms, e.g., as mentioned above, the MP-PCA denoising
(:ref:`example-denoise-mppca`) and Gibbs Unringing
(:ref:`example-denoise-gibbs`) algorithms. Alternatively, one can overcome this
artefact by computing the kurtosis values from powder-averaged
diffusion-weighted signals. The details on how to compute
the kurtosis from powder-average signals in dipy are described in follow the
tutorial (:ref:`example-reconst-msdki`).
## Mean kurtosis tensor and kurtosis fractional anisotropy
As pointed by previous studies [Hansen2013]_, axial, radial and mean kurtosis
depends on the information of both diffusion and kurtosis tensor. DKI measures
that only depend on the kurtosis tensor include the mean of the kurtosis tensor
[Hansen2013]_, and the kurtosis fractional anisotropy [GlennR2015]_. This
measures are computed and illustrated bellow:
"""
MKT = dkifit.mkt(0, 3)
KFA = dkifit.kfa
fig3, ax = plt.subplots(1, 2, figsize=(10, 6),
subplot_kw={'xticks': [], 'yticks': []})
fig3.subplots_adjust(hspace=0.3, wspace=0.05)
ax.flat[0].imshow(MKT[:, :, axial_slice].T, cmap='gray', vmin=0, vmax=1.5,
origin='lower')
ax.flat[0].set_title('MKT')
ax.flat[1].imshow(KFA[:, :, axial_slice].T, cmap='gray', vmin=0, vmax=1,
origin='lower')
ax.flat[1].set_title('KFA')
plt.show()
fig3.savefig('Measures_from_kurtosis_tensor_only.png')
"""
.. figure:: Measures_from_kurtosis_tensor_only.png
:align: center
DKI measures obtained from the kurtosis tensor only.
As reported by [Hansen2013]_, the mean of the kurtosis tensor (MKT) produces
similar maps than the standard mean kurtosis (MK). On the other hand,
the kurtosis fractional anisotropy (KFA) maps shows that the kurtosis tensor
have different degrees of anisotropy than the FA measures from the diffusion
tensor.
References
----------
.. [Jensen2005] Jensen JH, Helpern JA, Ramani A, Lu H, Kaczynski K (2005).
Diffusional Kurtosis Imaging: The Quantification of
Non_Gaussian Water Diffusion by Means of Magnetic Resonance
Imaging. Magnetic Resonance in Medicine 53: 1432-1440
.. [Jensen2010] Jensen JH, Helpern JA (2010). MRI quantification of
non-Gaussian water diffusion by kurtosis analysis. NMR in
Biomedicine 23(7): 698-710
.. [Fierem2011] Fieremans E, Jensen JH, Helpern JA (2011). White matter
characterization with diffusion kurtosis imaging. NeuroImage
58: 177-188
.. [Veraar2011] Veraart J, Poot DH, Van Hecke W, Blockx I, Van der Linden A,
Verhoye M, Sijbers J (2011). More Accurate Estimation of
Diffusion Tensor Parameters Using Diffusion Kurtosis Imaging.
Magnetic Resonance in Medicine 65(1): 138-145
.. [NetoHe2012] Neto Henriques R, Ferreira H, Correia M, (2012). Diffusion
kurtosis imaging of the healthy human brain. Master
Dissertation Bachelor and Master Programin Biomedical
Engineering and Biophysics, Faculty of Sciences.
http://repositorio.ul.pt/bitstream/10451/8511/1/ulfc104137_tm_Rafael_Henriques.pdf
.. [Hansen2013] Hansen B, Lund TE, Sangill R, and Jespersen SN (2013).
Experimentally and computationally393fast method for estimation
of a mean kurtosis. Magnetic Resonance in Medicine 69,
1754–1760.394doi:10.1002/mrm.24743
.. [GlennR2015] Glenn GR, Helpern JA, Tabesh A, Jensen JH (2015).
Quantitative assessment of diffusional387kurtosis anisotropy.
NMR in Biomedicine28, 448–459. doi:10.1002/nbm.3271
.. [NetoHe2015] Neto Henriques R, Correia MM, Nunes RG, Ferreira HA (2015).
Exploring the 3D geometry of the diffusion kurtosis tensor -
Impact on the development of robust tractography procedures and
novel biomarkers, NeuroImage 111: 85-99
.. [Perron2015] Perrone D, Aelterman J, Pižurica A, Jeurissen B, Philips W,
Leemans A, (2015). The effect of Gibbs ringing artifacts on
measures derived from diffusion MRI. Neuroimage 120, 441-455.
https://doi.org/10.1016/j.neuroimage.2015.06.068.
.. [TaxCMW2015] Tax CMW, Otte WM, Viergever MA, Dijkhuizen RM, Leemans A
(2014). REKINDLE: Robust extraction of kurtosis INDices with
linear estimation. Magnetic Resonance in Medicine 73(2):
794-808.
.. [Hansen2016] Hansen, B, Jespersen, SN (2016). Data for evaluation of fast
kurtosis strategies, b-value optimization and exploration of
diffusion MRI contrast. Scientific Data 3: 160072
doi:10.1038/sdata.2016.72
.. [NetoHe2018] Neto Henriques R (2018). Advanced Methods for Diffusion MRI
Data Analysis and their Application to the Healthy Ageing Brain
(Doctoral thesis). https://doi.org/10.17863/CAM.29356
.. include:: ../links_names.inc
"""
| bsd-3-clause |
dscrobonia/sawyer | analyzers/response_size_centroidmedian.py | 1 | 3343 | import json
import logging
import matplotlib.pyplot as plt
import numpy as np
import scipy.cluster.hierarchy as hac
log = logging.getLogger(__name__)
def analyze(data):
# Convert this to python data for us to be able to run ML algorithms
json_to_python = json.loads(data)
per_size = dict() # IP-Response size
hostlist = dict()
# Data pre-processing here:
for y in json_to_python:
hostlist[y['HOST']] = 1
if y['HOST'] in per_size:
per_size[y['HOST']].append(int(y['SIZE']))
else:
per_size[y['HOST']] = [int(y['SIZE'])]
##Data pre-processing ends here
log.debug(
"*** Printing Input to analysis - 4 (1): K-means on IP and average response size ****"
)
#####*****SIZE******####
#### Analysis #4 (1): IP address - Size of response received feature
X = np.array([[0.00, 0.00]])
for x in hostlist:
avg_size = mean(per_size[x])
log.debug(x + ": " + str(avg_size))
y = x.split(".")
ip = ""
for z in range(4):
l = len(y[z])
l = 3 - l
if (l > 0):
zero = ""
for t in range(3 - len(y[z])):
zero = zero + "0"
y[z] = zero + y[z]
ip = ip + y[z]
# log.debug( str(float(float(ip)/1000)) + ": " + str(avg_size))
le = [float(float(ip) / 1000), avg_size]
X = np.vstack([X, le])
log.info(
"******** Printing Analysis #4: IP-Address and Response Size received: Centroid and Median Hierarchical Clustering ********\nCheck 'test-centroid-median.png' for more info!"
)
# print kmeans.labels_
### Analysis 4 (9): ###### CENTROID AND MEDIAN HAC*****#########
fig, axes23 = plt.subplots(2, 3)
for method, axes in zip(['centroid', 'median'], axes23):
z = hac.linkage(X, method=method)
# Plotting
axes[0].plot(range(1, len(z) + 1), z[::-1, 2])
knee = np.diff(z[::-1, 2], 2)
axes[0].plot(range(2, len(z)), knee)
num_clust1 = knee.argmax() + 2
knee[knee.argmax()] = 0
num_clust2 = knee.argmax() + 2
axes[0].text(num_clust1, z[::-1, 2][num_clust1 - 1],
'possible\n<- knee point')
part1 = hac.fcluster(z, num_clust1, 'maxclust')
part2 = hac.fcluster(z, num_clust2, 'maxclust')
clr = [
'#2200CC', '#D9007E', '#FF6600', '#FFCC00', '#ACE600', '#0099CC',
'#8900CC', '#FF0000', '#FF9900', '#FFFF00', '#00CC01', '#0055CC'
]
for part, ax in zip([part1, part2], axes[1:]):
for cluster in set(part):
ax.scatter(
X[part == cluster, 0],
X[part == cluster, 1],
color=clr[cluster % 10])
m = '\n(method: {})'.format(method)
plt.setp(
axes[0],
title='Screeplot{}'.format(m),
xlabel='partition',
ylabel='{}\ncluster distance'.format(m))
plt.setp(axes[1], title='{} Clusters'.format(num_clust1))
plt.setp(axes[2], title='{} Clusters'.format(num_clust2))
plt.tight_layout()
##plt.show()
plt.savefig('test-centroid-median.png')
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
| mit |
numenta-ci/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_mixed.py | 70 | 3776 | from matplotlib._image import frombuffer
from matplotlib.backends.backend_agg import RendererAgg
class MixedModeRenderer(object):
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, width, height, dpi, vector_renderer, raster_renderer_class=None):
"""
width: The width of the canvas in logical units
height: The height of the canvas in logical units
dpi: The dpi of the canvas
vector_renderer: An instance of a subclass of RendererBase
that will be used for the vector drawing.
raster_renderer_class: The renderer class to use for the
raster drawing. If not provided, this will use the Agg
backend (which is currently the only viable option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
assert not vector_renderer.option_image_nocomposite()
self._vector_renderer = vector_renderer
self._raster_renderer = None
self._rasterizing = 0
self._set_current_renderer(vector_renderer)
_methods = """
close_group draw_image draw_markers draw_path
draw_path_collection draw_quad_mesh draw_tex draw_text
finalize flipy get_canvas_width_height get_image_magnification
get_texmanager get_text_width_height_descent new_gc open_group
option_image_nocomposite points_to_pixels strip_math
""".split()
def _set_current_renderer(self, renderer):
self._renderer = renderer
for method in self._methods:
if hasattr(renderer, method):
setattr(self, method, getattr(renderer, method))
renderer.start_rasterizing = self.start_rasterizing
renderer.stop_rasterizing = self.stop_rasterizing
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
stop_rasterizing is called) will be drawn with the raster
backend.
If start_rasterizing is called multiple times before
stop_rasterizing is called, this method has no effect.
"""
if self._rasterizing == 0:
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._set_current_renderer(self._raster_renderer)
self._rasterizing += 1
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last start_rasterizing command will be copied to the
vector backend by calling draw_image.
If stop_rasterizing is called multiple times before
start_rasterizing is called, this method has no effect.
"""
self._rasterizing -= 1
if self._rasterizing == 0:
self._set_current_renderer(self._vector_renderer)
width, height = self._width * self.dpi, self._height * self.dpi
buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
l, b, w, h = bounds
if w > 0 and h > 0:
image = frombuffer(buffer, w, h, True)
image.is_grayscale = False
image.flipud_out()
self._renderer.draw_image(l, height - b - h, image, None)
self._raster_renderer = None
self._rasterizing = False
| agpl-3.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/mpl_toolkits/mplot3d/axes3d.py | 7 | 92718 | #!/usr/bin/python
# axes3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts fixed by Reinier Heeres <[email protected]>
# Minor additions by Ben Axelrod <[email protected]>
# Significant updates and revisions by Ben Root <[email protected]>
"""
Module containing Axes3D, an object which can plot 3D objects on a
2D matplotlib figure.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import six
from six.moves import map, xrange, zip, reduce
import warnings
from operator import itemgetter
import matplotlib.axes as maxes
from matplotlib.axes import Axes, rcParams
from matplotlib import cbook
import matplotlib.transforms as mtransforms
from matplotlib.transforms import Bbox
import matplotlib.collections as mcoll
from matplotlib import docstring
import matplotlib.scale as mscale
from matplotlib.tri.triangulation import Triangulation
import numpy as np
from matplotlib.colors import Normalize, colorConverter, LightSource
from . import art3d
from . import proj3d
from . import axis3d
def unit_bbox():
box = Bbox(np.array([[0, 0], [1, 1]]))
return box
class Axes3D(Axes):
"""
3D axes object.
"""
name = '3d'
_shared_z_axes = cbook.Grouper()
def __init__(self, fig, rect=None, *args, **kwargs):
'''
Build an :class:`Axes3D` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*azim* Azimuthal viewing angle (default -60)
*elev* Elevation viewing angle (default 30)
*zscale* [%(scale)s]
*sharez* Other axes to share z-limits with
================ =========================================
.. versionadded :: 1.2.1
*sharez*
''' % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
if rect is None:
rect = [0.0, 0.0, 1.0, 1.0]
self._cids = []
self.initial_azim = kwargs.pop('azim', -60)
self.initial_elev = kwargs.pop('elev', 30)
zscale = kwargs.pop('zscale', None)
sharez = kwargs.pop('sharez', None)
self.xy_viewLim = unit_bbox()
self.zz_viewLim = unit_bbox()
self.xy_dataLim = unit_bbox()
self.zz_dataLim = unit_bbox()
# inihibit autoscale_view until the axes are defined
# they can't be defined until Axes.__init__ has been called
self.view_init(self.initial_elev, self.initial_azim)
self._ready = 0
self._sharez = sharez
if sharez is not None:
self._shared_z_axes.join(self, sharez)
self._adjustable = 'datalim'
Axes.__init__(self, fig, rect,
frameon=True,
*args, **kwargs)
# Disable drawing of axes by base class
Axes.set_axis_off(self)
# Enable drawing of axes by Axes3D class
self.set_axis_on()
self.M = None
# func used to format z -- fall back on major formatters
self.fmt_zdata = None
if zscale is not None :
self.set_zscale(zscale)
if self.zaxis is not None :
self._zcid = self.zaxis.callbacks.connect('units finalize',
self.relim)
else :
self._zcid = None
self._ready = 1
self.mouse_init()
self.set_top_view()
self.axesPatch.set_linewidth(0)
# Calculate the pseudo-data width and height
pseudo_bbox = self.transLimits.inverted().transform([(0, 0), (1, 1)])
self._pseudo_w, self._pseudo_h = pseudo_bbox[1] - pseudo_bbox[0]
self.figure.add_axes(self)
def set_axis_off(self):
self._axis3don = False
def set_axis_on(self):
self._axis3don = True
def have_units(self):
"""
Return *True* if units are set on the *x*, *y*, or *z* axes
"""
return (self.xaxis.have_units() or self.yaxis.have_units() or
self.zaxis.have_units())
def convert_zunits(self, z):
"""
For artists in an axes, if the zaxis has units support,
convert *z* using zaxis unit type
.. versionadded :: 1.2.1
"""
return self.zaxis.convert_units(z)
def _process_unit_info(self, xdata=None, ydata=None, zdata=None,
kwargs=None):
"""
Look for unit *kwargs* and update the axis instances as necessary
"""
Axes._process_unit_info(self, xdata=xdata, ydata=ydata, kwargs=kwargs)
if self.xaxis is None or self.yaxis is None or self.zaxis is None:
return
if zdata is not None:
# we only need to update if there is nothing set yet.
if not self.zaxis.have_units():
self.zaxis.update_units(xdata)
# process kwargs 2nd since these will override default units
if kwargs is not None:
zunits = kwargs.pop('zunits', self.zaxis.units)
if zunits != self.zaxis.units:
self.zaxis.set_units(zunits)
# If the units being set imply a different converter,
# we need to update.
if zdata is not None:
self.zaxis.update_units(zdata)
def set_top_view(self):
# this happens to be the right view for the viewing coordinates
# moved up and to the left slightly to fit labels and axes
xdwl = (0.95/self.dist)
xdw = (0.9/self.dist)
ydwl = (0.95/self.dist)
ydw = (0.9/self.dist)
# This is purposely using the 2D Axes's set_xlim and set_ylim,
# because we are trying to place our viewing pane.
Axes.set_xlim(self, -xdwl, xdw, auto=None)
Axes.set_ylim(self, -ydwl, ydw, auto=None)
def _init_axis(self):
'''Init 3D axes; overrides creation of regular X/Y axes'''
self.w_xaxis = axis3d.XAxis('x', self.xy_viewLim.intervalx,
self.xy_dataLim.intervalx, self)
self.xaxis = self.w_xaxis
self.w_yaxis = axis3d.YAxis('y', self.xy_viewLim.intervaly,
self.xy_dataLim.intervaly, self)
self.yaxis = self.w_yaxis
self.w_zaxis = axis3d.ZAxis('z', self.zz_viewLim.intervalx,
self.zz_dataLim.intervalx, self)
self.zaxis = self.w_zaxis
for ax in self.xaxis, self.yaxis, self.zaxis:
ax.init3d()
def get_children(self):
return [self.zaxis,] + Axes.get_children(self)
def unit_cube(self, vals=None):
minx, maxx, miny, maxy, minz, maxz = vals or self.get_w_lims()
xs, ys, zs = ([minx, maxx, maxx, minx, minx, maxx, maxx, minx],
[miny, miny, maxy, maxy, miny, miny, maxy, maxy],
[minz, minz, minz, minz, maxz, maxz, maxz, maxz])
return list(zip(xs, ys, zs))
def tunit_cube(self, vals=None, M=None):
if M is None:
M = self.M
xyzs = self.unit_cube(vals)
tcube = proj3d.proj_points(xyzs, M)
return tcube
def tunit_edges(self, vals=None, M=None):
tc = self.tunit_cube(vals, M)
edges = [(tc[0], tc[1]),
(tc[1], tc[2]),
(tc[2], tc[3]),
(tc[3], tc[0]),
(tc[0], tc[4]),
(tc[1], tc[5]),
(tc[2], tc[6]),
(tc[3], tc[7]),
(tc[4], tc[5]),
(tc[5], tc[6]),
(tc[6], tc[7]),
(tc[7], tc[4])]
return edges
def draw(self, renderer):
# draw the background patch
self.axesPatch.draw(renderer)
self._frameon = False
# add the projection matrix to the renderer
self.M = self.get_proj()
renderer.M = self.M
renderer.vvec = self.vvec
renderer.eye = self.eye
renderer.get_axis_position = self.get_axis_position
# Calculate projection of collections and zorder them
zlist = [(col.do_3d_projection(renderer), col) \
for col in self.collections]
zlist.sort(key=itemgetter(0), reverse=True)
for i, (z, col) in enumerate(zlist):
col.zorder = i
# Calculate projection of patches and zorder them
zlist = [(patch.do_3d_projection(renderer), patch) \
for patch in self.patches]
zlist.sort(key=itemgetter(0), reverse=True)
for i, (z, patch) in enumerate(zlist):
patch.zorder = i
if self._axis3don:
axes = (self.xaxis, self.yaxis, self.zaxis)
# Draw panes first
for ax in axes:
ax.draw_pane(renderer)
# Then axes
for ax in axes:
ax.draw(renderer)
# Then rest
Axes.draw(self, renderer)
def get_axis_position(self):
vals = self.get_w_lims()
tc = self.tunit_cube(vals, self.M)
xhigh = tc[1][2] > tc[2][2]
yhigh = tc[3][2] > tc[2][2]
zhigh = tc[0][2] > tc[2][2]
return xhigh, yhigh, zhigh
def update_datalim(self, xys, **kwargs):
pass
def get_autoscale_on(self) :
"""
Get whether autoscaling is applied for all axes on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return Axes.get_autoscale_on(self) and self.get_autoscalez_on()
def get_autoscalez_on(self) :
"""
Get whether autoscaling for the z-axis is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return self._autoscaleZon
def set_autoscale_on(self, b) :
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
Axes.set_autoscale_on(self, b)
self.set_autoscalez_on(b)
def set_autoscalez_on(self, b) :
"""
Set whether autoscaling for the z-axis is applied on plot commands
accepts: [ *True* | *False* ]
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self._autoscalez_on = b
def set_zmargin(self, m) :
"""
Set padding of Z data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if m < 0 or m > 1 :
raise ValueError("margin must be in range 0 to 1")
self._zmargin = m
def margins(self, *args, **kw) :
"""
Convenience method to set or retrieve autoscaling margins.
signatures::
margins()
returns xmargin, ymargin, zmargin
::
margins(margin)
margins(xmargin, ymargin, zmargin)
margins(x=xmargin, y=ymargin, z=zmargin)
margins(..., tight=False)
All forms above set the xmargin, ymargin and zmargin
parameters. All keyword parameters are optional. A single argument
specifies xmargin, ymargin and zmargin. The *tight* parameter
is passed to :meth:`autoscale_view`, which is executed after
a margin is changed; the default here is *True*, on the
assumption that when margins are specified, no additional
padding to match tick marks is usually desired. Setting
*tight* to *None* will preserve the previous setting.
Specifying any margin changes only the autoscaling; for example,
if *xmargin* is not None, then *xmargin* times the X data
interval will be added to each end of that interval before
it is used in autoscaling.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if not args and not kw:
return self._xmargin, self._ymargin, self._zmargin
tight = kw.pop('tight', True)
mx = kw.pop('x', None)
my = kw.pop('y', None)
mz = kw.pop('z', None)
if len(args) == 1:
mx = my = mz = args[0]
elif len(args) == 2:
# Maybe put out a warning because mz is not set?
mx, my = args
elif len(args) == 3:
mx, my, mz = args
else:
raise ValueError("more than three arguments were supplied")
if mx is not None:
self.set_xmargin(mx)
if my is not None:
self.set_ymargin(my)
if mz is not None:
self.set_zmargin(mz)
scalex = (mx is not None)
scaley = (my is not None)
scalez = (mz is not None)
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley,
scalez=scalez)
def autoscale(self, enable=True, axis='both', tight=None) :
"""
Convenience method for simple axis view autoscaling.
See :meth:`matplotlib.axes.Axes.autoscale` for full explanation.
Note that this function behaves the same, but for all
three axes. Therfore, 'z' can be passed for *axis*,
and 'both' applies to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if enable is None:
scalex = True
scaley = True
scalez = True
else:
scalex = False
scaley = False
scalez = False
if axis in ['x', 'both']:
self._autoscaleXon = bool(enable)
scalex = self._autoscaleXon
if axis in ['y', 'both']:
self._autoscaleYon = bool(enable)
scaley = self._autoscaleYon
if axis in ['z', 'both']:
self._autoscaleZon = bool(enable)
scalez = self._autoscaleZon
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley,
scalez=scalez)
def auto_scale_xyz(self, X, Y, Z=None, had_data=None):
x, y, z = list(map(np.asarray, (X, Y, Z)))
try:
x, y = x.flatten(), y.flatten()
if Z is not None:
z = z.flatten()
except AttributeError:
raise
# This updates the bounding boxes as to keep a record as
# to what the minimum sized rectangular volume holds the
# data.
self.xy_dataLim.update_from_data_xy(np.array([x, y]).T, not had_data)
if z is not None:
self.zz_dataLim.update_from_data_xy(np.array([z, z]).T, not had_data)
# Let autoscale_view figure out how to use this data.
self.autoscale_view()
def autoscale_view(self, tight=None, scalex=True, scaley=True,
scalez=True) :
"""
Autoscale the view limits using the data limits.
See :meth:`matplotlib.axes.Axes.autoscale_view` for documentation.
Note that this function applies to the 3D axes, and as such
adds the *scalez* to the function arguments.
.. versionchanged :: 1.1.0
Function signature was changed to better match the 2D version.
*tight* is now explicitly a kwarg and placed first.
.. versionchanged :: 1.2.1
This is now fully functional.
"""
if not self._ready:
return
# This method looks at the rectangular volume (see above)
# of data and decides how to scale the view portal to fit it.
if tight is None:
# if image data only just use the datalim
_tight = self._tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)
else:
_tight = self._tight = bool(tight)
if scalex and self._autoscaleXon:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = self.xy_dataLim.intervalx
xlocator = self.xaxis.get_major_locator()
try:
x0, x1 = xlocator.nonsingular(x0, x1)
except AttributeError:
x0, x1 = mtransforms.nonsingular(x0, x1, increasing=False,
expander=0.05)
if self._xmargin > 0:
delta = (x1 - x0) * self._xmargin
x0 -= delta
x1 += delta
if not _tight:
x0, x1 = xlocator.view_limits(x0, x1)
self.set_xbound(x0, x1)
if scaley and self._autoscaleYon:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = self.xy_dataLim.intervaly
ylocator = self.yaxis.get_major_locator()
try:
y0, y1 = ylocator.nonsingular(y0, y1)
except AttributeError:
y0, y1 = mtransforms.nonsingular(y0, y1, increasing=False,
expander=0.05)
if self._ymargin > 0:
delta = (y1 - y0) * self._ymargin
y0 -= delta
y1 += delta
if not _tight:
y0, y1 = ylocator.view_limits(y0, y1)
self.set_ybound(y0, y1)
if scalez and self._autoscaleZon:
zshared = self._shared_z_axes.get_siblings(self)
dl = [ax.dataLim for ax in zshared]
bb = mtransforms.BboxBase.union(dl)
z0, z1 = self.zz_dataLim.intervalx
zlocator = self.zaxis.get_major_locator()
try:
z0, z1 = zlocator.nonsingular(z0, z1)
except AttributeError:
z0, z1 = mtransforms.nonsingular(z0, z1, increasing=False,
expander=0.05)
if self._zmargin > 0:
delta = (z1 - z0) * self._zmargin
z0 -= delta
z1 += delta
if not _tight:
z0, z1 = zlocator.view_limits(z0, z1)
self.set_zbound(z0, z1)
def get_w_lims(self):
'''Get 3D world limits.'''
minx, maxx = self.get_xlim3d()
miny, maxy = self.get_ylim3d()
minz, maxz = self.get_zlim3d()
return minx, maxx, miny, maxy, minz, maxz
def _determine_lims(self, xmin=None, xmax=None, *args, **kwargs):
if xmax is None and cbook.iterable(xmin):
xmin, xmax = xmin
if xmin == xmax:
xmin -= 0.05
xmax += 0.05
return (xmin, xmax)
def set_xlim3d(self, left=None, right=None, emit=True, auto=False, **kw):
"""
Set 3D x limits.
See :meth:`matplotlib.axes.Axes.set_xlim` for full documentation.
"""
if 'xmin' in kw:
left = kw.pop('xmin')
if 'xmax' in kw:
right = kw.pop('xmax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if right is None and cbook.iterable(left):
left, right = left
self._process_unit_info(xdata=(left, right))
if left is not None:
left = self.convert_xunits(left)
if right is not None:
right = self.convert_xunits(right)
old_left, old_right = self.get_xlim()
if left is None:
left = old_left
if right is None:
right = old_right
if left == right:
warnings.warn(('Attempting to set identical left==right results\n'
'in singular transformations; automatically expanding.\n'
'left=%s, right=%s') % (left, right))
left, right = mtransforms.nonsingular(left, right, increasing=False)
left, right = self.xaxis.limit_range_for_scale(left, right)
self.xy_viewLim.intervalx = (left, right)
if auto is not None:
self._autoscaleXon = bool(auto)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.xy_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return left, right
set_xlim = set_xlim3d
def set_ylim3d(self, bottom=None, top=None, emit=True, auto=False, **kw):
"""
Set 3D y limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation.
"""
if 'ymin' in kw:
bottom = kw.pop('ymin')
if 'ymax' in kw:
top = kw.pop('ymax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if top is None and cbook.iterable(bottom):
bottom, top = bottom
self._process_unit_info(ydata=(bottom, top))
if bottom is not None:
bottom = self.convert_yunits(bottom)
if top is not None:
top = self.convert_yunits(top)
old_bottom, old_top = self.get_ylim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
self.xy_viewLim.intervaly = (bottom, top)
if auto is not None:
self._autoscaleYon = bool(auto)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.xy_viewLim.intervaly,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return bottom, top
set_ylim = set_ylim3d
def set_zlim3d(self, bottom=None, top=None, emit=True, auto=False, **kw):
"""
Set 3D z limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation
"""
if 'zmin' in kw:
bottom = kw.pop('zmin')
if 'zmax' in kw:
top = kw.pop('zmax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if top is None and cbook.iterable(bottom):
bottom, top = bottom
self._process_unit_info(zdata=(bottom, top))
if bottom is not None:
bottom = self.convert_zunits(bottom)
if top is not None:
top = self.convert_zunits(top)
old_bottom, old_top = self.get_zlim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.zaxis.limit_range_for_scale(bottom, top)
self.zz_viewLim.intervalx = (bottom, top)
if auto is not None:
self._autoscaleZon = bool(auto)
if emit:
self.callbacks.process('zlim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_z_axes.get_siblings(self):
if other is not self:
other.set_zlim(self.zz_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return bottom, top
set_zlim = set_zlim3d
def get_xlim3d(self):
return self.xy_viewLim.intervalx
get_xlim3d.__doc__ = maxes.Axes.get_xlim.__doc__
get_xlim = get_xlim3d
get_xlim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D x-limits
"""
def get_ylim3d(self):
return self.xy_viewLim.intervaly
get_ylim3d.__doc__ = maxes.Axes.get_ylim.__doc__
get_ylim = get_ylim3d
get_ylim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D y-limits.
"""
def get_zlim3d(self):
'''Get 3D z limits.'''
return self.zz_viewLim.intervalx
get_zlim = get_zlim3d
def get_zscale(self) :
"""
Return the zaxis scale string %s
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
""" % (", ".join(mscale.get_scale_names()))
return self.zaxis.get_scale()
# We need to slightly redefine these to pass scalez=False
# to their calls of autoscale_view.
def set_xscale(self, value, **kwargs) :
self.xaxis._set_scale(value, **kwargs)
self.autoscale_view(scaley=False, scalez=False)
self._update_transScale()
set_xscale.__doc__ = maxes.Axes.set_xscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
def set_yscale(self, value, **kwargs) :
self.yaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scalez=False)
self._update_transScale()
set_yscale.__doc__ = maxes.Axes.set_yscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
@docstring.dedent_interpd
def set_zscale(self, value, **kwargs) :
"""
call signature::
set_zscale(value)
Set the scaling of the z-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
.. note ::
Currently, Axes3D objects only supports linear scales.
Other scales may or may not work, and support for these
is improving with each release.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scaley=False)
self._update_transScale()
def set_zticks(self, *args, **kwargs):
"""
Set z-axis tick locations.
See :meth:`matplotlib.axes.Axes.set_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticks(*args, **kwargs)
def get_zticks(self, minor=False):
"""
Return the z ticks as a list of locations
See :meth:`matplotlib.axes.Axes.get_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklocs(minor=minor)
def get_zmajorticklabels(self) :
"""
Get the ztick labels as a list of Text instances
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_majorticklabels())
def get_zminorticklabels(self) :
"""
Get the ztick labels as a list of Text instances
.. note::
Minor ticks are not supported. This function was added
only for completeness.
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_minorticklabels())
def set_zticklabels(self, *args, **kwargs) :
"""
Set z-axis tick labels.
See :meth:`matplotlib.axes.Axes.set_yticklabels` for more details.
.. note::
Minor ticks are not supported by Axes3D objects.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticklabels(*args, **kwargs)
def get_zticklabels(self, minor=False) :
"""
Get ztick labels as a list of Text instances.
See :meth:`matplotlib.axes.Axes.get_yticklabels` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_ticklabels(minor=minor))
def zaxis_date(self, tz=None) :
"""
Sets up z-axis ticks and labels that treat the z data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
.. note::
This function is merely provided for completeness.
Axes3D objects do not officially support dates for ticks,
and so this may or may not work as expected.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis.axis_date(tz)
def get_zticklines(self) :
"""
Get ztick lines as a list of Line2D instances.
Note that this function is provided merely for completeness.
These lines are re-calculated as the display changes.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklines()
def clabel(self, *args, **kwargs):
"""
This function is currently not implemented for 3D axes.
Returns *None*.
"""
return None
def view_init(self, elev=None, azim=None):
"""
Set the elevation and azimuth of the axes.
This can be used to rotate the axes programatically.
'elev' stores the elevation angle in the z plane.
'azim' stores the azimuth angle in the x,y plane.
if elev or azim are None (default), then the initial value
is used which was specified in the :class:`Axes3D` constructor.
"""
self.dist = 10
if elev is None:
self.elev = self.initial_elev
else:
self.elev = elev
if azim is None:
self.azim = self.initial_azim
else:
self.azim = azim
def get_proj(self):
"""
Create the projection matrix from the current viewing position.
elev stores the elevation angle in the z plane
azim stores the azimuth angle in the x,y plane
dist is the distance of the eye viewing point from the object
point.
"""
relev, razim = np.pi * self.elev/180, np.pi * self.azim/180
xmin, xmax = self.get_xlim3d()
ymin, ymax = self.get_ylim3d()
zmin, zmax = self.get_zlim3d()
# transform to uniform world coordinates 0-1.0,0-1.0,0-1.0
worldM = proj3d.world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax)
# look into the middle of the new coordinates
R = np.array([0.5, 0.5, 0.5])
xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist
yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist
zp = R[2] + np.sin(relev) * self.dist
E = np.array((xp, yp, zp))
self.eye = E
self.vvec = R - E
self.vvec = self.vvec / proj3d.mod(self.vvec)
if abs(relev) > np.pi/2:
# upside down
V = np.array((0, 0, -1))
else:
V = np.array((0, 0, 1))
zfront, zback = -self.dist, self.dist
viewM = proj3d.view_transformation(E, R, V)
perspM = proj3d.persp_transformation(zfront, zback)
M0 = np.dot(viewM, worldM)
M = np.dot(perspM, M0)
return M
def mouse_init(self, rotate_btn=1, zoom_btn=3):
"""Initializes mouse button callbacks to enable 3D rotation of
the axes. Also optionally sets the mouse buttons for 3D rotation
and zooming.
============ =======================================================
Argument Description
============ =======================================================
*rotate_btn* The integer or list of integers specifying which mouse
button or buttons to use for 3D rotation of the axes.
Default = 1.
*zoom_btn* The integer or list of integers specifying which mouse
button or buttons to use to zoom the 3D axes.
Default = 3.
============ =======================================================
"""
self.button_pressed = None
canv = self.figure.canvas
if canv is not None:
c1 = canv.mpl_connect('motion_notify_event', self._on_move)
c2 = canv.mpl_connect('button_press_event', self._button_press)
c3 = canv.mpl_connect('button_release_event', self._button_release)
self._cids = [c1, c2, c3]
else:
warnings.warn('Axes3D.figure.canvas is \'None\', mouse rotation disabled. Set canvas then call Axes3D.mouse_init().')
# coerce scalars into array-like, then convert into
# a regular list to avoid comparisons against None
# which breaks in recent versions of numpy.
self._rotate_btn = np.atleast_1d(rotate_btn).tolist()
self._zoom_btn = np.atleast_1d(zoom_btn).tolist()
def can_zoom(self) :
"""
Return *True* if this axes supports the zoom box button functionality.
3D axes objects do not use the zoom box button.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
3D axes objects do not use the pan/zoom button.
"""
return False
def cla(self):
"""
Clear axes
"""
# Disabling mouse interaction might have been needed a long
# time ago, but I can't find a reason for it now - BVR (2012-03)
#self.disable_mouse_rotation()
self.zaxis.cla()
if self._sharez is not None:
self.zaxis.major = self._sharez.zaxis.major
self.zaxis.minor = self._sharez.zaxis.minor
z0, z1 = self._sharez.get_zlim()
self.set_zlim(z0, z1, emit=False, auto=None)
self.zaxis._set_scale(self._sharez.zaxis.get_scale())
else:
self.zaxis._set_scale('linear')
self._autoscaleZon = True
self._zmargin = 0
Axes.cla(self)
self.grid(rcParams['axes3d.grid'])
def disable_mouse_rotation(self):
"""Disable mouse button callbacks.
"""
# Disconnect the various events we set.
for cid in self._cids:
self.figure.canvas.mpl_disconnect(cid)
self._cids = []
def _button_press(self, event):
if event.inaxes == self:
self.button_pressed = event.button
self.sx, self.sy = event.xdata, event.ydata
def _button_release(self, event):
self.button_pressed = None
def format_zdata(self, z):
"""
Return *z* string formatted. This function will use the
:attr:`fmt_zdata` attribute if it is callable, else will fall
back on the zaxis major formatter
"""
try: return self.fmt_zdata(z)
except (AttributeError, TypeError):
func = self.zaxis.get_major_formatter().format_data_short
val = func(z)
return val
def format_coord(self, xd, yd):
"""
Given the 2D view coordinates attempt to guess a 3D coordinate.
Looks for the nearest edge to the point and then assumes that
the point is at the same z location as the nearest point on the edge.
"""
if self.M is None:
return ''
if self.button_pressed in self._rotate_btn:
return 'azimuth=%d deg, elevation=%d deg ' % (self.azim, self.elev)
# ignore xd and yd and display angles instead
p = (xd, yd)
edges = self.tunit_edges()
#lines = [proj3d.line2d(p0,p1) for (p0,p1) in edges]
ldists = [(proj3d.line2d_seg_dist(p0, p1, p), i) for \
i, (p0, p1) in enumerate(edges)]
ldists.sort()
# nearest edge
edgei = ldists[0][1]
p0, p1 = edges[edgei]
# scale the z value to match
x0, y0, z0 = p0
x1, y1, z1 = p1
d0 = np.hypot(x0-xd, y0-yd)
d1 = np.hypot(x1-xd, y1-yd)
dt = d0+d1
z = d1/dt * z0 + d0/dt * z1
x, y, z = proj3d.inv_transform(xd, yd, z, self.M)
xs = self.format_xdata(x)
ys = self.format_ydata(y)
zs = self.format_zdata(z)
return 'x=%s, y=%s, z=%s' % (xs, ys, zs)
def _on_move(self, event):
"""Mouse moving
button-1 rotates by default. Can be set explicitly in mouse_init().
button-3 zooms by default. Can be set explicitly in mouse_init().
"""
if not self.button_pressed:
return
if self.M is None:
return
x, y = event.xdata, event.ydata
# In case the mouse is out of bounds.
if x is None:
return
dx, dy = x - self.sx, y - self.sy
w = self._pseudo_w
h = self._pseudo_h
self.sx, self.sy = x, y
# Rotation
if self.button_pressed in self._rotate_btn:
# rotate viewing point
# get the x and y pixel coords
if dx == 0 and dy == 0:
return
self.elev = art3d.norm_angle(self.elev - (dy/h)*180)
self.azim = art3d.norm_angle(self.azim - (dx/w)*180)
self.get_proj()
self.figure.canvas.draw_idle()
# elif self.button_pressed == 2:
# pan view
# project xv,yv,zv -> xw,yw,zw
# pan
# pass
# Zoom
elif self.button_pressed in self._zoom_btn:
# zoom view
# hmmm..this needs some help from clipping....
minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
df = 1-((h - dy)/h)
dx = (maxx-minx)*df
dy = (maxy-miny)*df
dz = (maxz-minz)*df
self.set_xlim3d(minx - dx, maxx + dx)
self.set_ylim3d(miny - dy, maxy + dy)
self.set_zlim3d(minz - dz, maxz + dz)
self.get_proj()
self.figure.canvas.draw_idle()
def set_zlabel(self, zlabel, fontdict=None, labelpad=None, **kwargs):
'''
Set zlabel. See doc for :meth:`set_ylabel` for description.
.. note::
Currently, *labelpad* does not have an effect on the labels.
'''
# FIXME: With a rework of axis3d.py, the labelpad should work again
# At that point, remove the above message in the docs.
if labelpad is not None : self.zaxis.labelpad = labelpad
return self.zaxis.set_label_text(zlabel, fontdict, **kwargs)
def get_zlabel(self) :
"""
Get the z-label text string.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
label = self.zaxis.get_label()
return label.get_text()
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the 3D axes panels are drawn
.. versionadded :: 1.1.0
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the 3D axes panels are drawn
ACCEPTS: [ *True* | *False* ]
.. versionadded :: 1.1.0
"""
self._frameon = bool(b)
def get_axisbelow(self):
"""
Get whether axis below is true or not.
For axes3d objects, this will always be *True*
.. versionadded :: 1.1.0
This function was added for completeness.
"""
return True
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below
most artists
For axes3d objects, this will ignore any settings and just use *True*
ACCEPTS: [ *True* | *False* ]
.. versionadded :: 1.1.0
This function was added for completeness.
"""
self._axisbelow = True
def grid(self, b=True, **kwargs):
'''
Set / unset 3D grid.
.. note::
Currently, this function does not behave the same as
:meth:`matplotlib.axes.Axes.grid`, but it is intended to
eventually support that behavior.
.. versionchanged :: 1.1.0
This function was changed, but not tested. Please report any bugs.
'''
# TODO: Operate on each axes separately
if len(kwargs) :
b = True
self._draw_grid = cbook._string_to_bool(b)
def ticklabel_format(self, **kwargs) :
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes in Axed3D objects.
See :meth:`matplotlib.axes.Axes.ticklabel_format` for full
documentation. Note that this version applies to all three
axes of the Axes3D object. Therefore, the *axis* argument
will also accept a value of 'z' and the value of 'both' will
apply to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
useOffset = kwargs.pop('useOffset', None)
axis = kwargs.pop('axis', 'both').lower()
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError("comma style remains to be added")
elif style == '':
sb = None
else:
raise ValueError("%s is not a valid style value")
try:
if sb is not None:
if axis in ['both', 'z']:
self.xaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'z'] :
self.zaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_powerlimits(scilimits)
if useOffset is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_useOffset(useOffset)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def locator_params(self, axis='both', tight=None, **kwargs) :
"""
Convenience method for controlling tick locators.
See :meth:`matplotlib.axes.Axes.locator_params` for full
documentation Note that this is for Axes3D objects,
therefore, setting *axis* to 'both' will result in the
parameters being set for all three axes. Also, *axis*
can also take a value of 'z' to apply parameters to the
z axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
_x = axis in ['x', 'both']
_y = axis in ['y', 'both']
_z = axis in ['z', 'both']
if _x:
self.xaxis.get_major_locator().set_params(**kwargs)
if _y:
self.yaxis.get_major_locator().set_params(**kwargs)
if _z:
self.zaxis.get_major_locator().set_params(**kwargs)
self.autoscale_view(tight=tight, scalex=_x, scaley=_y, scalez=_z)
def tick_params(self, axis='both', **kwargs) :
"""
Convenience method for changing the appearance of ticks and
tick labels.
See :meth:`matplotlib.axes.Axes.tick_params` for more complete
documentation.
The only difference is that setting *axis* to 'both' will
mean that the settings are applied to all three axes. Also,
the *axis* parameter also accepts a value of 'z', which
would mean to apply to only the z-axis.
Also, because of how Axes3D objects are drawn very differently
from regular 2D axes, some of these settings may have
ambiguous meaning. For simplicity, the 'z' axis will
accept settings as if it was like the 'y' axis.
.. note::
While this function is currently implemented, the core part
of the Axes3D object may ignore some of these settings.
Future releases will fix this. Priority will be given to
those who file bugs.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
Axes.tick_params(self, axis, **kwargs)
if axis in ['z', 'both'] :
zkw = dict(kwargs)
zkw.pop('top', None)
zkw.pop('bottom', None)
zkw.pop('labeltop', None)
zkw.pop('labelbottom', None)
self.zaxis.set_tick_params(**zkw)
### data limits, ticks, tick labels, and formatting
def invert_zaxis(self):
"""
Invert the z-axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
self.set_zlim(top, bottom, auto=None)
def zaxis_inverted(self):
'''
Returns True if the z-axis is inverted.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
'''
bottom, top = self.get_zlim()
return top < bottom
def get_zbound(self):
"""
Returns the z-axis numerical bounds where::
lowerBound < upperBound
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
if bottom < top:
return bottom, top
else:
return top, bottom
def set_zbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the z-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the :attr:`_autoscaleZon` attribute.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if upper is None and cbook.iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_zbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.zaxis_inverted():
if lower < upper:
self.set_zlim(upper, lower, auto=None)
else:
self.set_zlim(lower, upper, auto=None)
else :
if lower < upper:
self.set_zlim(lower, upper, auto=None)
else :
self.set_zlim(upper, lower, auto=None)
def text(self, x, y, z, s, zdir=None, **kwargs):
'''
Add text to the plot. kwargs will be passed on to Axes.text,
except for the `zdir` keyword, which sets the direction to be
used as the z direction.
'''
text = Axes.text(self, x, y, s, **kwargs)
art3d.text_2d_to_3d(text, z, zdir)
return text
text3D = text
text2D = Axes.text
def plot(self, xs, ys, *args, **kwargs):
'''
Plot 2D or 3D data.
========== ================================================
Argument Description
========== ================================================
*xs*, *ys* X, y coordinates of vertices
*zs* z value(s), either one for all points or one for
each point.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Other arguments are passed on to
:func:`~matplotlib.axes.Axes.plot`
'''
# FIXME: This argument parsing might be better handled
# when we set later versions of python for
# minimum requirements. Currently at 2.4.
# Note that some of the reason for the current difficulty
# is caused by the fact that we want to insert a new
# (semi-optional) positional argument 'Z' right before
# many other traditional positional arguments occur
# such as the color, linestyle and/or marker.
had_data = self.has_data()
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
argsi = 0
# First argument is array of zs
if len(args) > 0 and cbook.iterable(args[0]) and \
len(xs) == len(args[0]) :
# So, we know that it is an array with
# first dimension the same as xs.
# Next, check to see if the data contained
# therein (if any) is scalar (and not another array).
if len(args[0]) == 0 or cbook.is_scalar(args[0][0]) :
zs = args[argsi]
argsi += 1
# First argument is z value
elif len(args) > 0 and cbook.is_scalar(args[0]):
zs = args[argsi]
argsi += 1
# Match length
if not cbook.iterable(zs):
zs = np.ones(len(xs)) * zs
lines = Axes.plot(self, xs, ys, *args[argsi:], **kwargs)
for line in lines:
art3d.line_2d_to_3d(line, zs=zs, zdir=zdir)
self.auto_scale_xyz(xs, ys, zs, had_data)
return lines
plot3D = plot
def plot_surface(self, X, Y, Z, *args, **kwargs):
'''
Create a surface plot.
By default it will be colored in shades of a solid color,
but it also supports color mapping by supplying the *cmap*
argument.
The `rstride` and `cstride` kwargs set the stride used to
sample the input data to generate the graph. If 1k by 1k
arrays are passed in the default values for the strides will
result in a 100x100 grid being plotted.
============= ================================================
Argument Description
============= ================================================
*X*, *Y*, *Z* Data values as 2D arrays
*rstride* Array row stride (step size), defaults to 10
*cstride* Array column stride (step size), defaults to 10
*color* Color of the surface patches
*cmap* A colormap for the surface patches.
*facecolors* Face colors for the individual patches
*norm* An instance of Normalize to map values to colors
*vmin* Minimum value to map
*vmax* Maximum value to map
*shade* Whether to shade the facecolors
============= ================================================
Other arguments are passed on to
:class:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
'''
had_data = self.has_data()
Z = np.atleast_2d(Z)
# TODO: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
rstride = kwargs.pop('rstride', 10)
cstride = kwargs.pop('cstride', 10)
if 'facecolors' in kwargs:
fcolors = kwargs.pop('facecolors')
else:
color = np.array(colorConverter.to_rgba(kwargs.pop('color', 'b')))
fcolors = None
cmap = kwargs.get('cmap', None)
norm = kwargs.pop('norm', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
linewidth = kwargs.get('linewidth', None)
shade = kwargs.pop('shade', cmap is None)
lightsource = kwargs.pop('lightsource', None)
# Shade the data
if shade and cmap is not None and fcolors is not None:
fcolors = self._shade_colors_lightsource(Z, cmap, lightsource)
polys = []
# Only need these vectors to shade if there is no cmap
if cmap is None and shade :
totpts = int(np.ceil(float(rows - 1) / rstride) *
np.ceil(float(cols - 1) / cstride))
v1 = np.empty((totpts, 3))
v2 = np.empty((totpts, 3))
# This indexes the vertex points
which_pt = 0
#colset contains the data for coloring: either average z or the facecolor
colset = []
for rs in xrange(0, rows-1, rstride):
for cs in xrange(0, cols-1, cstride):
ps = []
for a in (X, Y, Z) :
ztop = a[rs,cs:min(cols, cs+cstride+1)]
zleft = a[rs+1:min(rows, rs+rstride+1),
min(cols-1, cs+cstride)]
zbase = a[min(rows-1, rs+rstride), cs:min(cols, cs+cstride+1):][::-1]
zright = a[rs:min(rows-1, rs+rstride):, cs][::-1]
z = np.concatenate((ztop, zleft, zbase, zright))
ps.append(z)
# The construction leaves the array with duplicate points, which
# are removed here.
ps = list(zip(*ps))
lastp = np.array([])
ps2 = [ps[0]] + [ps[i] for i in xrange(1, len(ps)) if ps[i] != ps[i-1]]
avgzsum = sum(p[2] for p in ps2)
polys.append(ps2)
if fcolors is not None:
colset.append(fcolors[rs][cs])
else:
colset.append(avgzsum / len(ps2))
# Only need vectors to shade if no cmap
if cmap is None and shade:
i1, i2, i3 = 0, int(len(ps2)/3), int(2*len(ps2)/3)
v1[which_pt] = np.array(ps2[i1]) - np.array(ps2[i2])
v2[which_pt] = np.array(ps2[i2]) - np.array(ps2[i3])
which_pt += 1
if cmap is None and shade:
normals = np.cross(v1, v2)
else :
normals = []
polyc = art3d.Poly3DCollection(polys, *args, **kwargs)
if fcolors is not None:
if shade:
colset = self._shade_colors(colset, normals)
polyc.set_facecolors(colset)
polyc.set_edgecolors(colset)
elif cmap:
colset = np.array(colset)
polyc.set_array(colset)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
colset = self._shade_colors(color, normals)
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(X, Y, Z, had_data)
return polyc
def _generate_normals(self, polygons):
'''
Generate normals for polygons by using the first three points.
This normal of course might not make sense for polygons with
more than three points not lying in a plane.
'''
normals = []
for verts in polygons:
v1 = np.array(verts[0]) - np.array(verts[1])
v2 = np.array(verts[2]) - np.array(verts[0])
normals.append(np.cross(v1, v2))
return normals
def _shade_colors(self, color, normals):
'''
Shade *color* using normal vectors given by *normals*.
*color* can also be an array of the same length as *normals*.
'''
shade = np.array([np.dot(n / proj3d.mod(n), [-1, -1, 0.5])
if proj3d.mod(n) else np.nan
for n in normals])
mask = ~np.isnan(shade)
if len(shade[mask]) > 0:
norm = Normalize(min(shade[mask]), max(shade[mask]))
shade[~mask] = min(shade[mask])
color = colorConverter.to_rgba_array(color)
# shape of color should be (M, 4) (where M is number of faces)
# shape of shade should be (M,)
# colors should have final shape of (M, 4)
alpha = color[:, 3]
colors = (0.5 + norm(shade)[:, np.newaxis] * 0.5) * color
colors[:, 3] = alpha
else:
colors = np.asanyarray(color).copy()
return colors
def _shade_colors_lightsource(self, data, cmap, lightsource):
if lightsource is None:
lightsource = LightSource(azdeg=135, altdeg=55)
return lightsource.shade(data, cmap)
def plot_wireframe(self, X, Y, Z, *args, **kwargs):
'''
Plot a 3D wireframe.
The `rstride` and `cstride` kwargs set the stride used to
sample the input data to generate the graph.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as 2D arrays
*Z*
*rstride* Array row stride (step size), defaults to 1
*cstride* Array column stride (step size), defaults to 1
========== ================================================
Keyword arguments are passed on to
:class:`~matplotlib.collections.LineCollection`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Line3DCollection`
'''
rstride = kwargs.pop("rstride", 1)
cstride = kwargs.pop("cstride", 1)
had_data = self.has_data()
Z = np.atleast_2d(Z)
# FIXME: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
# We want two sets of lines, one running along the "rows" of
# Z and another set of lines running along the "columns" of Z.
# This transpose will make it easy to obtain the columns.
tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
rii = list(xrange(0, rows, rstride))
cii = list(xrange(0, cols, cstride))
# Add the last index only if needed
if rows > 0 and rii[-1] != (rows - 1) :
rii += [rows-1]
if cols > 0 and cii[-1] != (cols - 1) :
cii += [cols-1]
# If the inputs were empty, then just
# reset everything.
if Z.size == 0 :
rii = []
cii = []
xlines = [X[i] for i in rii]
ylines = [Y[i] for i in rii]
zlines = [Z[i] for i in rii]
txlines = [tX[i] for i in cii]
tylines = [tY[i] for i in cii]
tzlines = [tZ[i] for i in cii]
lines = [list(zip(xl, yl, zl)) for xl, yl, zl in \
zip(xlines, ylines, zlines)]
lines += [list(zip(xl, yl, zl)) for xl, yl, zl in \
zip(txlines, tylines, tzlines)]
linec = art3d.Line3DCollection(lines, *args, **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(X, Y, Z, had_data)
return linec
def plot_trisurf(self, *args, **kwargs):
"""
============= ================================================
Argument Description
============= ================================================
*X*, *Y*, *Z* Data values as 1D arrays
*color* Color of the surface patches
*cmap* A colormap for the surface patches.
*norm* An instance of Normalize to map values to colors
*vmin* Minimum value to map
*vmax* Maximum value to map
*shade* Whether to shade the facecolors
============= ================================================
The (optional) triangulation can be specified in one of two ways;
either::
plot_trisurf(triangulation, ...)
where triangulation is a :class:`~matplotlib.tri.Triangulation`
object, or::
plot_trisurf(X, Y, ...)
plot_trisurf(X, Y, triangles, ...)
plot_trisurf(X, Y, triangles=triangles, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of
these possibilities.
The remaining arguments are::
plot_trisurf(..., Z)
where *Z* is the array of values to contour, one per point
in the triangulation.
Other arguments are passed on to
:class:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
**Examples:**
.. plot:: mpl_examples/mplot3d/trisurf3d_demo.py
.. plot:: mpl_examples/mplot3d/trisurf3d_demo2.py
.. versionadded:: 1.2.0
This plotting function was added for the v1.2.0 release.
"""
had_data = self.has_data()
# TODO: Support custom face colours
color = np.array(colorConverter.to_rgba(kwargs.pop('color', 'b')))
cmap = kwargs.get('cmap', None)
norm = kwargs.pop('norm', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
linewidth = kwargs.get('linewidth', None)
shade = kwargs.pop('shade', cmap is None)
lightsource = kwargs.pop('lightsource', None)
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
if 'Z' in kwargs:
z = np.asarray(kwargs.pop('Z'))
else:
z = np.asarray(args[0])
# We do this so Z doesn't get passed as an arg to PolyCollection
args = args[1:]
triangles = tri.get_masked_triangles()
xt = tri.x[triangles][..., np.newaxis]
yt = tri.y[triangles][..., np.newaxis]
zt = z[triangles][..., np.newaxis]
verts = np.concatenate((xt, yt, zt), axis=2)
# Only need these vectors to shade if there is no cmap
if cmap is None and shade:
totpts = len(verts)
v1 = np.empty((totpts, 3))
v2 = np.empty((totpts, 3))
# This indexes the vertex points
which_pt = 0
colset = []
for i in xrange(len(verts)):
avgzsum = verts[i,0,2] + verts[i,1,2] + verts[i,2,2]
colset.append(avgzsum / 3.0)
# Only need vectors to shade if no cmap
if cmap is None and shade:
v1[which_pt] = np.array(verts[i,0]) - np.array(verts[i,1])
v2[which_pt] = np.array(verts[i,1]) - np.array(verts[i,2])
which_pt += 1
if cmap is None and shade:
normals = np.cross(v1, v2)
else:
normals = []
polyc = art3d.Poly3DCollection(verts, *args, **kwargs)
if cmap:
colset = np.array(colset)
polyc.set_array(colset)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
colset = self._shade_colors(color, normals)
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(tri.x, tri.y, z, had_data)
return polyc
def _3d_extend_contour(self, cset, stride=5):
'''
Extend a contour in 3D by creating
'''
levels = cset.levels
colls = cset.collections
dz = (levels[1] - levels[0]) / 2
for z, linec in zip(levels, colls):
topverts = art3d.paths_to_3d_segments(linec.get_paths(), z - dz)
botverts = art3d.paths_to_3d_segments(linec.get_paths(), z + dz)
color = linec.get_color()[0]
polyverts = []
normals = []
nsteps = round(len(topverts[0]) / stride)
if nsteps <= 1:
if len(topverts[0]) > 1:
nsteps = 2
else:
continue
stepsize = (len(topverts[0]) - 1) / (nsteps - 1)
for i in range(int(round(nsteps)) - 1):
i1 = int(round(i * stepsize))
i2 = int(round((i + 1) * stepsize))
polyverts.append([topverts[0][i1],
topverts[0][i2],
botverts[0][i2],
botverts[0][i1]])
v1 = np.array(topverts[0][i1]) - np.array(topverts[0][i2])
v2 = np.array(topverts[0][i1]) - np.array(botverts[0][i1])
normals.append(np.cross(v1, v2))
colors = self._shade_colors(color, normals)
colors2 = self._shade_colors(color, normals)
polycol = art3d.Poly3DCollection(polyverts,
facecolors=colors,
edgecolors=colors2)
polycol.set_sort_zpos(z)
self.add_collection3d(polycol)
for col in colls:
self.collections.remove(col)
def add_contour_set(self, cset, extend3d=False, stride=5, zdir='z', offset=None):
zdir = '-' + zdir
if extend3d:
self._3d_extend_contour(cset, stride)
else:
for z, linec in zip(cset.levels, cset.collections):
if offset is not None:
z = offset
art3d.line_collection_2d_to_3d(linec, z, zdir=zdir)
def add_contourf_set(self, cset, zdir='z', offset=None) :
zdir = '-' + zdir
for z, linec in zip(cset.levels, cset.collections) :
if offset is not None :
z = offset
art3d.poly_collection_2d_to_3d(linec, z, zdir=zdir)
linec.set_sort_zpos(z)
def contour(self, X, Y, Z, *args, **kwargs):
'''
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
The positional and other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contour`
Returns a :class:`~matplotlib.axes.Axes.contour`
'''
extend3d = kwargs.pop('extend3d', False)
stride = kwargs.pop('stride', 5)
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = Axes.contour(self, jX, jY, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contour3D = contour
def tricontour(self, *args, **kwargs):
"""
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged:: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
extend3d = kwargs.pop('extend3d', False)
stride = kwargs.pop('stride', 5)
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontour
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = Axes.tricontour(self, tri, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def contourf(self, X, Y, Z, *args, **kwargs):
'''
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the filled contour
on this position in plane normal to zdir
========== ================================================
The positional and keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contourf`
Returns a :class:`~matplotlib.axes.Axes.contourf`
.. versionchanged :: 1.1.0
The *zdir* and *offset* kwargs were added.
'''
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = Axes.contourf(self, jX, jY, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contourf3D = contourf
def tricontourf(self, *args, **kwargs):
"""
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged :: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontourf
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = Axes.tricontourf(self, tri, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def add_collection3d(self, col, zs=0, zdir='z'):
'''
Add a 3D collection object to the plot.
2D collection types are converted to a 3D version by
modifying the object and adding z coordinate information.
Supported are:
- PolyCollection
- LineColleciton
- PatchCollection
'''
zvals = np.atleast_1d(zs)
if len(zvals) > 0 :
zsortval = min(zvals)
else :
zsortval = 0 # FIXME: Fairly arbitrary. Is there a better value?
# FIXME: use issubclass() (although, then a 3D collection
# object would also pass.) Maybe have a collection3d
# abstract class to test for and exclude?
if type(col) is mcoll.PolyCollection:
art3d.poly_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.LineCollection:
art3d.line_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.PatchCollection:
art3d.patch_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
Axes.add_collection(self, col)
def scatter(self, xs, ys, zs=0, zdir='z', s=20, c='b', depthshade=True,
*args, **kwargs):
'''
Create a scatter plot.
============ ========================================================
Argument Description
============ ========================================================
*xs*, *ys* Positions of data points.
*zs* Either an array of the same length as *xs* and
*ys* or a single value to place all points in
the same plane. Default is 0.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
*s* size in points^2. It is a scalar or an array of the
same length as *x* and *y*.
*c* a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*depthshade*
Whether or not to shade the scatter markers to give
the appearance of depth. Default is *True*.
============ ========================================================
Keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.scatter`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
'''
had_data = self.has_data()
xs = np.ma.ravel(xs)
ys = np.ma.ravel(ys)
zs = np.ma.ravel(zs)
if xs.size != ys.size:
raise ValueError("Arguments 'xs' and 'ys' must be of same size.")
if xs.size != zs.size:
if zs.size == 1:
zs = np.tile(zs[0], xs.size)
else:
raise ValueError(("Argument 'zs' must be of same size as 'xs' "
"and 'ys' or of size 1."))
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
cstr = cbook.is_string_like(c) or cbook.is_sequence_of_strings(c)
if not cstr:
c = np.asanyarray(c)
if c.size == xs.size:
c = np.ma.ravel(c)
xs, ys, zs, s, c = cbook.delete_masked_points(xs, ys, zs, s, c)
patches = Axes.scatter(self, xs, ys, s=s, c=c, *args, **kwargs)
if not cbook.iterable(zs):
is_2d = True
zs = np.ones(len(xs)) * zs
else:
is_2d = False
art3d.patch_collection_2d_to_3d(patches, zs=zs, zdir=zdir,
depthshade=depthshade)
if self._zmargin < 0.05 and xs.size > 0:
self.set_zmargin(0.05)
#FIXME: why is this necessary?
if not is_2d:
self.auto_scale_xyz(xs, ys, zs, had_data)
return patches
scatter3D = scatter
def bar(self, left, height, zs=0, zdir='z', *args, **kwargs):
'''
Add 2D bar(s).
========== ================================================
Argument Description
========== ================================================
*left* The x coordinates of the left sides of the bars.
*height* The height of the bars.
*zs* Z coordinate of bars, if one value is specified
they will all be placed at the same z.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Keyword arguments are passed onto :func:`~matplotlib.axes.Axes.bar`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
'''
had_data = self.has_data()
patches = Axes.bar(self, left, height, *args, **kwargs)
if not cbook.iterable(zs):
zs = np.ones(len(left)) * zs
verts = []
verts_zs = []
for p, z in zip(patches, zs):
vs = art3d.get_patch_verts(p)
verts += vs.tolist()
verts_zs += [z] * len(vs)
art3d.patch_2d_to_3d(p, z, zdir)
if 'alpha' in kwargs:
p.set_alpha(kwargs['alpha'])
if len(verts) > 0 :
# the following has to be skipped if verts is empty
# NOTE: Bugs could still occur if len(verts) > 0,
# but the "2nd dimension" is empty.
xs, ys = list(zip(*verts))
else :
xs, ys = [], []
xs, ys, verts_zs = art3d.juggle_axes(xs, ys, verts_zs, zdir)
self.auto_scale_xyz(xs, ys, verts_zs, had_data)
return patches
def bar3d(self, x, y, z, dx, dy, dz, color='b',
zsort='average', *args, **kwargs):
'''
Generate a 3D bar, or multiple bars.
When generating multiple bars, x, y, z have to be arrays.
dx, dy, dz can be arrays or scalars.
*color* can be:
- A single color value, to color all bars the same color.
- An array of colors of length N bars, to color each bar
independently.
- An array of colors of length 6, to color the faces of the
bars similarly.
- An array of colors of length 6 * N bars, to color each face
independently.
When coloring the faces of the boxes specifically, this is
the order of the coloring:
1. -Z (bottom of box)
2. +Z (top of box)
3. -Y
4. +Y
5. -X
6. +X
Keyword arguments are passed onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
'''
had_data = self.has_data()
if not cbook.iterable(x):
x = [x]
if not cbook.iterable(y):
y = [y]
if not cbook.iterable(z):
z = [z]
if not cbook.iterable(dx):
dx = [dx]
if not cbook.iterable(dy):
dy = [dy]
if not cbook.iterable(dz):
dz = [dz]
if len(dx) == 1:
dx = dx * len(x)
if len(dy) == 1:
dy = dy * len(y)
if len(dz) == 1:
dz = dz * len(z)
if len(x) != len(y) or len(x) != len(z):
warnings.warn('x, y, and z must be the same length.')
# FIXME: This is archaic and could be done much better.
minx, miny, minz = 1e20, 1e20, 1e20
maxx, maxy, maxz = -1e20, -1e20, -1e20
polys = []
for xi, yi, zi, dxi, dyi, dzi in zip(x, y, z, dx, dy, dz):
minx = min(xi, minx)
maxx = max(xi + dxi, maxx)
miny = min(yi, miny)
maxy = max(yi + dyi, maxy)
minz = min(zi, minz)
maxz = max(zi + dzi, maxz)
polys.extend([
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi + dyi, zi), (xi, yi + dyi, zi)),
((xi, yi, zi + dzi), (xi + dxi, yi, zi + dzi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi, zi + dzi), (xi, yi, zi + dzi)),
((xi, yi + dyi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi, yi + dyi, zi),
(xi, yi + dyi, zi + dzi), (xi, yi, zi + dzi)),
((xi + dxi, yi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi + dxi, yi, zi + dzi)),
])
facecolors = []
if color is None:
# no color specified
facecolors = [None] * len(x)
elif len(color) == len(x):
# bar colors specified, need to expand to number of faces
for c in color:
facecolors.extend([c] * 6)
else:
# a single color specified, or face colors specified explicitly
facecolors = list(colorConverter.to_rgba_array(color))
if len(facecolors) < len(x):
facecolors *= (6 * len(x))
normals = self._generate_normals(polys)
sfacecolors = self._shade_colors(facecolors, normals)
col = art3d.Poly3DCollection(polys,
zsort=zsort,
facecolor=sfacecolors,
*args, **kwargs)
self.add_collection(col)
self.auto_scale_xyz((minx, maxx), (miny, maxy), (minz, maxz), had_data)
def set_title(self, label, fontdict=None, loc='center', **kwargs):
ret = Axes.set_title(self, label, fontdict=fontdict, loc=loc, **kwargs)
(x, y) = self.title.get_position()
self.title.set_y(0.92 * y)
return ret
set_title.__doc__ = maxes.Axes.set_title.__doc__
def quiver(self, *args, **kwargs):
"""
Plot a 3D field of arrows.
call signatures::
quiver(X, Y, Z, U, V, W, **kwargs)
Arguments:
*X*, *Y*, *Z*:
The x, y and z coordinates of the arrow locations
*U*, *V*, *W*:
The direction vector that the arrow is pointing
The arguments could be array-like or scalars, so long as they
they can be broadcast together. The arguments can also be
masked arrays. If an element in any of argument is masked, then
that corresponding quiver element will not be plotted.
Keyword arguments:
*length*: [1.0 | float]
The length of each quiver, default to 1.0, the unit is
the same with the axes
*arrow_length_ratio*: [0.3 | float]
The ratio of the arrow head with respect to the quiver,
default to 0.3
Any additional keyword arguments are delegated to
:class:`~matplotlib.collections.LineCollection`
"""
def calc_arrow(u, v, w, angle=15):
"""
To calculate the arrow head. (u, v, w) should be unit vector.
"""
# this part figures out the axis of rotation to use
# use unit vector perpendicular to (u,v,w) when |w|=1, by default
x, y, z = 0, 1, 0
# get the norm
norm = math.sqrt(v**2 + u**2)
# normalize it if it is safe
if norm > 0:
# get unit direction vector perpendicular to (u,v,w)
x, y = v/norm, -u/norm
# this function takes an angle, and rotates the (u,v,w)
# angle degrees around (x,y,z)
def rotatefunction(angle):
ra = math.radians(angle)
c = math.cos(ra)
s = math.sin(ra)
# construct the rotation matrix
R = np.matrix([[c+(x**2)*(1-c), x*y*(1-c)-z*s, x*z*(1-c)+y*s],
[y*x*(1-c)+z*s, c+(y**2)*(1-c), y*z*(1-c)-x*s],
[z*x*(1-c)-y*s, z*y*(1-c)+x*s, c+(z**2)*(1-c)]])
# construct the column vector for (u,v,w)
line = np.matrix([[u],[v],[w]])
# use numpy to multiply them to get the rotated vector
rotatedline = R*line
# return the rotated (u,v,w) from the computed matrix
return (rotatedline[0,0], rotatedline[1,0], rotatedline[2,0])
# compute and return the two arrowhead direction unit vectors
return rotatefunction(angle), rotatefunction(-angle)
def point_vector_to_line(point, vector, length):
"""
use a point and vector to generate lines
"""
lines = []
for var in np.linspace(0, length, num=2):
lines.append(list(zip(*(point - var * vector))))
lines = np.array(lines).swapaxes(0, 1)
return lines.tolist()
had_data = self.has_data()
# handle kwargs
# shaft length
length = kwargs.pop('length', 1)
# arrow length ratio to the shaft length
arrow_length_ratio = kwargs.pop('arrow_length_ratio', 0.3)
# handle args
argi = 6
if len(args) < argi:
ValueError('Wrong number of arguments. Expected %d got %d' %
(argi, len(args)))
# first 6 arguments are X, Y, Z, U, V, W
input_args = args[:argi]
# if any of the args are scalar, convert into list
input_args = [[k] if isinstance(k, (int, float)) else k
for k in input_args]
# extract the masks, if any
masks = [k.mask for k in input_args if isinstance(k, np.ma.MaskedArray)]
# broadcast to match the shape
bcast = np.broadcast_arrays(*(input_args + masks))
input_args = bcast[:argi]
masks = bcast[argi:]
if masks:
# combine the masks into one
mask = reduce(np.logical_or, masks)
# put mask on and compress
input_args = [np.ma.array(k, mask=mask).compressed()
for k in input_args]
else:
input_args = [k.flatten() for k in input_args]
if any(len(v) == 0 for v in input_args):
# No quivers, so just make an empty collection and return early
linec = art3d.Line3DCollection([], *args[argi:], **kwargs)
self.add_collection(linec)
return linec
# Following assertions must be true before proceeding
# must all be ndarray
assert all(isinstance(k, np.ndarray) for k in input_args)
# must all in same shape
assert len(set([k.shape for k in input_args])) == 1
xs, ys, zs, us, vs, ws = input_args[:argi]
lines = []
# for each arrow
for i in range(xs.shape[0]):
# calulate body
x = xs[i]
y = ys[i]
z = zs[i]
u = us[i]
v = vs[i]
w = ws[i]
# (u,v,w) expected to be normalized, recursive to fix A=0 scenario.
if u == 0 and v == 0 and w == 0:
# Just don't make a quiver for such a case.
continue
# normalize
norm = math.sqrt(u ** 2 + v ** 2 + w ** 2)
u /= norm
v /= norm
w /= norm
# draw main line
t = np.linspace(0, length, num=20)
lx = x - t * u
ly = y - t * v
lz = z - t * w
line = list(zip(lx, ly, lz))
lines.append(line)
d1, d2 = calc_arrow(u, v, w)
ua1, va1, wa1 = d1[0], d1[1], d1[2]
ua2, va2, wa2 = d2[0], d2[1], d2[2]
# TODO: num should probably get parameterized
t = np.linspace(0, length * arrow_length_ratio, num=20)
la1x = x - t * ua1
la1y = y - t * va1
la1z = z - t * wa1
la2x = x - t * ua2
la2y = y - t * va2
la2z = z - t * wa2
line = list(zip(la1x, la1y, la1z))
lines.append(line)
line = list(zip(la2x, la2y, la2z))
lines.append(line)
linec = art3d.Line3DCollection(lines, *args[argi:], **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(xs, ys, zs, had_data)
return linec
quiver3D = quiver
def get_test_data(delta=0.05):
'''
Return a tuple X, Y, Z with a test data set.
'''
from matplotlib.mlab import bivariate_normal
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = Z2 - Z1
X = X * 10
Y = Y * 10
Z = Z * 500
return X, Y, Z
########################################################
# Register Axes3D as a 'projection' object available
# for use just like any other axes
########################################################
import matplotlib.projections as proj
proj.projection_registry.register(Axes3D)
| mit |
nhejazi/scikit-learn | sklearn/preprocessing/tests/test_data.py | 12 | 75601 | # Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import QuantileTransformer
from sklearn.preprocessing.data import quantile_transform
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names(
[u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.nan, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [[np.inf, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_quantile_transform_iris():
X = iris.data
# uniform output distribution
transformer = QuantileTransformer(n_quantiles=30)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# normal output distribution
transformer = QuantileTransformer(n_quantiles=30,
output_distribution='normal')
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure it is possible to take the inverse of a sparse matrix
# which contain negative value; this is the case in the iris dataset
X_sparse = sparse.csc_matrix(X)
X_sparse_tran = transformer.fit_transform(X_sparse)
X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran)
assert_array_almost_equal(X_sparse.A, X_sparse_tran_inv.A)
def test_quantile_transform_check_error():
X = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X = sparse.csc_matrix(X)
X_neg = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[-2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X_neg = sparse.csc_matrix(X_neg)
assert_raises_regex(ValueError, "Invalid value for 'n_quantiles': 0.",
QuantileTransformer(n_quantiles=0).fit, X)
assert_raises_regex(ValueError, "Invalid value for 'subsample': 0.",
QuantileTransformer(subsample=0).fit, X)
assert_raises_regex(ValueError, "The number of quantiles cannot be"
" greater than the number of samples used. Got"
" 1000 quantiles and 10 samples.",
QuantileTransformer(subsample=10).fit, X)
transformer = QuantileTransformer(n_quantiles=10)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.fit, X_neg)
transformer.fit(X)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.transform, X_neg)
X_bad_feat = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.transform, X_bad_feat)
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.inverse_transform, X_bad_feat)
transformer = QuantileTransformer(n_quantiles=10,
output_distribution='rnd')
# check that an error is raised at fit time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.fit, X)
# check that an error is raised at transform time
transformer.output_distribution = 'uniform'
transformer.fit(X)
X_tran = transformer.transform(X)
transformer.output_distribution = 'rnd'
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.transform, X)
# check that an error is raised at inverse_transform time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.inverse_transform, X_tran)
def test_quantile_transform_sparse_ignore_zeros():
X = np.array([[0, 1],
[0, 0],
[0, 2],
[0, 2],
[0, 1]])
X_sparse = sparse.csc_matrix(X)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
# dense case -> warning raise
assert_warns_message(UserWarning, "'ignore_implicit_zeros' takes effect"
" only with sparse matrix. This parameter has no"
" effect.", transformer.fit, X)
X_expected = np.array([[0, 0],
[0, 0],
[0, 1],
[0, 1],
[0, 0]])
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
# consider the case where sparse entries are missing values and user-given
# zeros are to be considered
X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0., 0.5],
[0., 0.],
[0., 1.],
[0., 1.],
[0., 0.5],
[0., 0.],
[0., 0.5],
[0., 1.],
[0., 0.]])
assert_almost_equal(X_expected, X_trans.A)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0, 1],
[0, 0.375],
[0, 0.375],
[0, 0.375],
[0, 1],
[0, 0],
[0, 1]])
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
# check in conjunction with subsampling
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5,
subsample=8,
random_state=0)
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
def test_quantile_transform_dense_toy():
X = np.array([[0, 2, 2.6],
[25, 4, 4.1],
[50, 6, 2.3],
[75, 8, 9.5],
[100, 10, 0.1]])
transformer = QuantileTransformer(n_quantiles=5)
transformer.fit(X)
# using the a uniform output, each entry of X should be map between 0 and 1
# and equally spaced
X_trans = transformer.fit_transform(X)
X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T
assert_almost_equal(np.sort(X_trans, axis=0), X_expected)
X_test = np.array([
[-1, 1, 0],
[101, 11, 10],
])
X_expected = np.array([
[0, 0, 0],
[1, 1, 1],
])
assert_array_almost_equal(transformer.transform(X_test), X_expected)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_quantile_transform_subsampling():
# Test that subsampling the input yield to a consistent results We check
# that the computed quantiles are almost mapped to a [0, 1] vector where
# values are equally spaced. The infinite norm is checked to be smaller
# than a given threshold. This is repeated 5 times.
# dense support
n_samples = 1000000
n_quantiles = 1000
X = np.sort(np.random.sample((n_samples, 1)), axis=0)
ROUND = 5
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert_true(inf_norm < 1e-2)
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
# sparse support
# TODO: rng should be seeded once we drop support for older versions of
# scipy (< 0.13) that don't support seeding.
X = sparse.rand(n_samples, 1, density=.99, format='csc')
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert_true(inf_norm < 1e-1)
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
def test_quantile_transform_sparse_toy():
X = np.array([[0., 2., 0.],
[25., 4., 0.],
[50., 0., 2.6],
[0., 0., 4.1],
[0., 6., 0.],
[0., 8., 0.],
[75., 0., 2.3],
[0., 10., 0.],
[0., 0., 9.5],
[100., 0., 0.1]])
X = sparse.csc_matrix(X)
transformer = QuantileTransformer(n_quantiles=10)
transformer.fit(X)
X_trans = transformer.fit_transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
transformer_dense = QuantileTransformer(n_quantiles=10).fit(
X.toarray())
X_trans = transformer_dense.transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer_dense.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
def test_quantile_transform_axis1():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5)
X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5)
assert_array_almost_equal(X_trans_a0, X_trans_a1.T)
def test_quantile_transform_bounds():
# Lower and upper bounds are manually mapped. We checked that in the case
# of a constant feature and binary feature, the bounds are properly mapped.
X_dense = np.array([[0, 0],
[0, 0],
[1, 0]])
X_sparse = sparse.csc_matrix(X_dense)
# check sparse and dense are consistent
X_trans = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_dense)
assert_array_almost_equal(X_trans, X_dense)
X_trans_sp = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_sparse)
assert_array_almost_equal(X_trans_sp.A, X_dense)
assert_array_almost_equal(X_trans, X_trans_sp.A)
# check the consistency of the bounds by learning on 1 matrix
# and transforming another
X = np.array([[0, 1],
[0, 0.5],
[1, 0]])
X1 = np.array([[0, 0.1],
[0, 0.5],
[1, 0.1]])
transformer = QuantileTransformer(n_quantiles=3).fit(X)
X_trans = transformer.transform(X1)
assert_array_almost_equal(X_trans, X1)
# check that values outside of the range learned will be mapped properly.
X = np.random.random((1000, 1))
transformer = QuantileTransformer()
transformer.fit(X)
assert_equal(transformer.transform(-10), transformer.transform(np.min(X)))
assert_equal(transformer.transform(10), transformer.transform(np.max(X)))
assert_equal(transformer.inverse_transform(-10),
transformer.inverse_transform(
np.min(transformer.references_)))
assert_equal(transformer.inverse_transform(10),
transformer.inverse_transform(
np.max(transformer.references_)))
def test_quantile_transform_and_inverse():
# iris dataset
X = iris.data
transformer = QuantileTransformer(n_quantiles=1000, random_state=0)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, 'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
assert_raises(NotImplementedError, normalize, X_sparse,
norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())])
# did the pipeline set the _pairwise attribute?
assert_true(pipeline._pairwise)
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
error_msg = "unknown categorical feature present \[2\] during transform."
assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def test_transform_selected_copy_arg():
# transformer that alters X
def _mutating_transformer(X):
X[0, 0] = X[0, 0] + 1
return X
original_X = np.asarray([[1, 2], [3, 4]])
expected_Xtr = [[2, 2], [3, 4]]
X = original_X.copy()
Xtr = _transform_selected(X, _mutating_transformer, copy=True,
selected='all')
assert_array_equal(toarray(X), toarray(original_X))
assert_array_equal(toarray(Xtr), expected_Xtr)
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
def test_quantile_transform_valid_axis():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
assert_raises_regex(ValueError, "axis should be either equal to 0 or 1"
". Got axis=2", quantile_transform, X.T, axis=2)
| bsd-3-clause |
wzbozon/statsmodels | statsmodels/datasets/randhie/data.py | 25 | 2667 | """RAND Health Insurance Experiment Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is in the public domain."""
TITLE = __doc__
SOURCE = """
The data was collected by the RAND corporation as part of the Health
Insurance Experiment (HIE).
http://www.rand.org/health/projects/hie.html
This data was used in::
Cameron, A.C. amd Trivedi, P.K. 2005. `Microeconometrics: Methods
and Applications,` Cambridge: New York.
And was obtained from: <http://cameron.econ.ucdavis.edu/mmabook/mmadata.html>
See randhie/src for the original data and description. The data included
here contains only a subset of the original data. The data varies slightly
compared to that reported in Cameron and Trivedi.
"""
DESCRSHORT = """The RAND Co. Health Insurance Experiment Data"""
DESCRLONG = """"""
NOTE = """::
Number of observations - 20,190
Number of variables - 10
Variable name definitions::
mdvis - Number of outpatient visits to an MD
lncoins - ln(coinsurance + 1), 0 <= coninsurance <= 100
idp - 1 if individual deductible plan, 0 otherwise
lpi - ln(max(1, annual participation incentive payment))
fmde - 0 if idp = 1; ln(max(1, MDE/(0.01 coinsurance))) otherwise
physlm - 1 if the person has a physical limitation
disea - number of chronic diseases
hlthg - 1 if self-rated health is good
hlthf - 1 if self-rated health is fair
hlthp - 1 if self-rated health is poor
(Omitted category is excellent self-rated health)
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
PATH = '%s/%s' % (dirname(abspath(__file__)), 'randhie.csv')
def load():
"""
Loads the RAND HIE data and returns a Dataset class.
----------
endog - response variable, mdvis
exog - design
Returns
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Loads the RAND HIE data and returns a Dataset class.
----------
endog - response variable, mdvis
exog - design
Returns
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
from pandas import read_csv
data = read_csv(PATH)
return du.process_recarray_pandas(data, endog_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(PATH, "rb"), delimiter=",", names=True, dtype=float)
return data
| bsd-3-clause |
MingdaZhou/gnuradio | gr-utils/python/utils/plot_psd_base.py | 75 | 12725 | #!/usr/bin/env python
#
# Copyright 2007,2008,2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
from scipy import log10
from gnuradio.eng_option import eng_option
class plot_psd_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.psdfftsize = options.psd_size
self.specfftsize = options.spec_size
self.dospec = options.enable_spec # if we want to plot the spectrogram
self.datatype = getattr(scipy, datatype) #scipy.complex64
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.95, ("File: %s" % filename),
weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.92, "File Position: ",
weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.92, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.915, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = scipy.array(self.sp_iq.get_xlim())
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % self.position)
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
return False
else:
# retesting length here as newer version of scipy does not throw a MemoryError, just
# returns a zero-length array
if(len(self.iq) > 0):
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.iq_psd, self.freq = self.dopsd(self.iq)
return True
else:
print "End of File"
return False
def dopsd(self, iq):
''' Need to do this here and plot later so we can do the fftshift '''
overlap = self.psdfftsize/4
winfunc = scipy.blackman
psd,freq = mlab.psd(iq, self.psdfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.psdfftsize),
noverlap = overlap)
psd = 10.0*log10(abs(psd))
return (psd, freq)
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
iqdims = [[0.075, 0.2, 0.4, 0.6], [0.075, 0.55, 0.4, 0.3]]
psddims = [[0.575, 0.2, 0.4, 0.6], [0.575, 0.55, 0.4, 0.3]]
specdims = [0.2, 0.125, 0.6, 0.3]
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=iqdims[self.dospec])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for PSD plot
self.sp_psd = self.fig.add_subplot(2,2,2, position=psddims[self.dospec])
self.sp_psd.set_title(("PSD"), fontsize=self.title_font_size, fontweight="bold")
self.sp_psd.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_psd.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
r = self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time(self.time, self.iq) # draw the plot
self.plot_psd = self.sp_psd.plot([], 'b') # make plot for PSD
self.draw_psd(self.freq, self.iq_psd) # draw the plot
if self.dospec:
# Subplot for spectrogram plot
self.sp_spec = self.fig.add_subplot(2,2,3, position=specdims)
self.sp_spec.set_title(("Spectrogram"), fontsize=self.title_font_size, fontweight="bold")
self.sp_spec.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_spec.set_ylabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.draw_spec(self.time, self.iq)
draw()
def draw_time(self, t, iq):
reals = iq.real
imags = iq.imag
self.plot_iq[0].set_data([t, reals])
self.plot_iq[1].set_data([t, imags])
self.sp_iq.set_xlim(t.min(), t.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_psd(self, f, p):
self.plot_psd[0].set_data([f, p])
self.sp_psd.set_ylim([p.min()-10, p.max()+10])
self.sp_psd.set_xlim([f.min(), f.max()])
def draw_spec(self, t, s):
overlap = self.specfftsize/4
winfunc = scipy.blackman
self.sp_spec.clear()
self.sp_spec.specgram(s, self.specfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.specfftsize),
noverlap = overlap, xextent=[t.min(), t.max()])
def update_plots(self):
self.draw_time(self.time, self.iq)
self.draw_psd(self.freq, self.iq_psd)
if self.dospec:
self.draw_spec(self.time, self.iq)
self.xlim = scipy.array(self.sp_iq.get_xlim()) # so zoom doesn't get called
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
#xmin = max(0, int(ceil(self.sample_rate*(newxlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(newxlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(newxlim[0]))))
xmax = min(int(ceil(self.sample_rate*(newxlim[1]))), len(self.iq))
iq = scipy.array(self.iq[xmin : xmax])
time = scipy.array(self.time[xmin : xmax])
iq_psd, freq = self.dopsd(iq)
self.draw_psd(freq, iq_psd)
self.xlim = scipy.array(self.sp_iq.get_xlim())
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
r = self.get_data()
if(r):
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
r = self.get_data()
if(r):
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio binary file (with specified data type using --data-type) and displays the I&Q data versus time as well as the power spectral density (PSD) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples. Finally, the size of the FFT to use for the PSD and spectrogram plots can be set independently with --psd-size and --spec-size, respectively. The spectrogram plot does not display by default and is turned on with -S or --enable-spec."
parser = OptionParser(option_class=eng_option, conflict_handler="resolve",
usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=8192,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="eng_float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
parser.add_option("", "--psd-size", type="int", default=1024,
help="Set the size of the PSD FFT [default=%default]")
parser.add_option("", "--spec-size", type="int", default=256,
help="Set the size of the spectrogram FFT [default=%default]")
parser.add_option("-S", "--enable-spec", action="store_true", default=False,
help="Turn on plotting the spectrogram [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_psd_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_psd_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
dopplershift/Scattering | scripts/dz-fig3.3.py | 1 | 1376 | import matplotlib.pyplot as plt
import numpy as np
import scattering
import scipy.constants as consts
d = np.linspace(0, 100, 500) * consts.milli
T = 0.0
wavelengths = np.array([10.0, 5.5, 3.21]) * consts.centi
lines = ['r--','b:','g-']
m_water = np.array([np.sqrt(80.255+24.313j), np.sqrt(65.476+37.026j),
np.sqrt(44.593+41.449j)])
m_ice = np.array([np.sqrt(3.16835+0.02492j), np.sqrt(3.16835+0.01068j),
np.sqrt(3.16835+0.0089j)])
plt.figure()
for mw, mi, lam, line in zip(m_water, m_ice, wavelengths, lines):
scat = scattering.scatterer(lam, T, 'water', diameters=d, ref_index=mw)
scat.set_scattering_model('tmatrix')
plt.subplot(1,2,1)
plt.semilogy(d / consts.milli, scat.sigma_b / (consts.centi)**2, line,
label='%5.2fcm Tmat' % (lam / consts.centi))
scat = scattering.scatterer(lam, T, 'ice', diameters=d, ref_index=mi)
scat.set_scattering_model('tmatrix')
plt.subplot(1,2,2)
plt.semilogy(d / consts.milli, scat.sigma_b / (consts.centi)**2, line,
label='%5.2fcm Tmat' % (lam / consts.centi))
plt.subplot(1,2,1)
plt.xlabel('Diameter (mm)')
plt.ylabel(r'Backscatter Cross-Section (cm$^{2}$)')
plt.xlim(0,100.0)
plt.ylim(1.0e-2,1e3)
plt.subplot(1,2,2)
plt.xlabel('Diameter (mm)')
plt.ylabel(r'Backscatter Cross-Section (cm$^{2}$)')
plt.xlim(0,100.0)
plt.ylim(1.0e-2,1e3)
plt.legend(loc='lower right')
plt.show()
| bsd-2-clause |
bzero/statsmodels | setup.py | 7 | 15962 | """
Much of the build system code was adapted from work done by the pandas
developers [1], which was in turn based on work done in pyzmq [2] and lxml [3].
[1] http://pandas.pydata.org
[2] http://zeromq.github.io/pyzmq/
[3] http://lxml.de/
"""
import os
from os.path import relpath, join as pjoin
import sys
import subprocess
import re
from distutils.version import StrictVersion
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
no_frills = (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info', '--version',
'clean')))
# try bootstrapping setuptools if it doesn't exist
try:
import pkg_resources
try:
pkg_resources.require("setuptools>=0.6c5")
except pkg_resources.VersionConflict:
from ez_setup import use_setuptools
use_setuptools(version="0.6c5")
from setuptools import setup, Command, find_packages
_have_setuptools = True
except ImportError:
# no setuptools installed
from distutils.core import setup, Command
_have_setuptools = False
if _have_setuptools:
setuptools_kwargs = {"zip_safe": False,
"test_suite": "nose.collector"}
else:
setuptools_kwargs = {}
if sys.version_info[0] >= 3:
sys.exit("Need setuptools to install statsmodels for Python 3.x")
curdir = os.path.abspath(os.path.dirname(__file__))
README = open(pjoin(curdir, "README.rst")).read()
DISTNAME = 'statsmodels'
DESCRIPTION = 'Statistical computations and models for use with SciPy'
LONG_DESCRIPTION = README
MAINTAINER = 'Skipper Seabold, Josef Perktold'
MAINTAINER_EMAIL ='[email protected]'
URL = 'http://statsmodels.sourceforge.net/'
LICENSE = 'BSD License'
DOWNLOAD_URL = ''
# These imports need to be here; setuptools needs to be imported first.
from distutils.extension import Extension
from distutils.command.build import build
from distutils.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
not numpy_incl in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'statsmodels'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def check_dependency_versions(min_versions):
"""
Don't let pip/setuptools do this all by itself. It's rude.
For all dependencies, try to import them and check if the versions of
installed dependencies match the minimum version requirements. If
installed but version too low, raise an error. If not installed at all,
return the correct ``setup_requires`` and ``install_requires`` arguments to
be added to the setuptools kwargs. This prevents upgrading installed
dependencies like numpy (that should be an explicit choice by the user and
never happen automatically), but make things work when installing into an
empty virtualenv for example.
"""
setup_requires = []
install_requires = []
try:
from numpy.version import short_version as npversion
except ImportError:
setup_requires.append('numpy')
install_requires.append('numpy')
else:
if not (StrictVersion(strip_rc(npversion)) >= min_versions['numpy']):
raise ImportError("Numpy version is %s. Requires >= %s" %
(npversion, min_versions['numpy']))
try:
import scipy
except ImportError:
install_requires.append('scipy')
else:
try:
from scipy.version import short_version as spversion
except ImportError:
from scipy.version import version as spversion # scipy 0.7.0
if not (StrictVersion(strip_rc(spversion)) >= min_versions['scipy']):
raise ImportError("Scipy version is %s. Requires >= %s" %
(spversion, min_versions['scipy']))
try:
from pandas.version import short_version as pversion
except ImportError:
install_requires.append('pandas')
else:
if not (StrictVersion(strip_rc(pversion)) >= min_versions['pandas']):
ImportError("Pandas version is %s. Requires >= %s" %
(pversion, min_versions['pandas']))
try:
from patsy import __version__ as patsy_version
except ImportError:
install_requires.append('patsy')
else:
# patsy dev looks like 0.1.0+dev
pversion = re.match("\d*\.\d*\.\d*", patsy_version).group()
if not (StrictVersion(pversion) >= min_versions['patsy']):
raise ImportError("Patsy version is %s. Requires >= %s" %
(pversion, min_versions["patsy"]))
return setup_requires, install_requires
MAJ = 0
MIN = 8
REV = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJ,MIN,REV)
classifiers = [ 'Development Status :: 4 - Beta',
'Environment :: Console',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering']
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(" ".join(cmd), stdout = subprocess.PIPE, env=env,
shell=True).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def write_version_py(filename=pjoin(curdir, 'statsmodels/version.py')):
cnt = "\n".join(["",
"# THIS FILE IS GENERATED FROM SETUP.PY",
"short_version = '%(version)s'",
"version = '%(version)s'",
"full_version = '%(full_version)s'",
"git_revision = '%(git_revision)s'",
"release = %(isrelease)s", "",
"if not release:",
" version = full_version"])
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
dowrite = True
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists(filename):
# must be a source distribution, use existing version file
try:
from statsmodels.version import git_revision as GIT_REVISION
except ImportError:
dowrite = False
GIT_REVISION = "Unknown"
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
if dowrite:
try:
a = open(filename, 'w')
a.write(cnt % {'version': VERSION,
'full_version' : FULLVERSION,
'git_revision' : GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
self._clean_exclude = ["bspline_ext.c",
"bspline_impl.c"]
for root, dirs, files in list(os.walk('statsmodels')):
for f in files:
if f in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
self._clean_me.append(pjoin(root, f))
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
for d in ('build',):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
import shutil
shutil.rmtree(clean_tree)
except Exception:
pass
class CheckingBuildExt(build_ext):
"""Subclass build_ext to get clearer report if Cython is necessary."""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
raise Exception("""Cython-generated file '%s' not found.
Cython is required to compile statsmodels from a development branch.
Please install Cython or download a source release of statsmodels.
""" % src)
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class DummyBuildSrc(Command):
""" numpy's build_src command interferes with Cython's build_ext.
"""
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass = {'clean': CleanCommand,
'build': build}
cmdclass["build_src"] = DummyBuildSrc
cmdclass["build_ext"] = CheckingBuildExt
# some linux distros require it
#NOTE: we are not currently using this but add it to Extension, if needed.
# libraries = ['m'] if 'win32' not in sys.platform else []
from numpy.distutils.misc_util import get_info
npymath_info = get_info("npymath")
ext_data = dict(
kalman_loglike = {"name" : "statsmodels/tsa/kalmanf/kalman_loglike.c",
"depends" : ["statsmodels/src/capsule.h"],
"include_dirs": ["statsmodels/src"],
"sources" : []},
_statespace = {"name" : "statsmodels/tsa/statespace/_statespace.c",
"depends" : ["statsmodels/src/capsule.h"],
"include_dirs": ["statsmodels/src"] + npymath_info['include_dirs'],
"libraries": npymath_info['libraries'],
"library_dirs": npymath_info['library_dirs'],
"sources" : []},
linbin = {"name" : "statsmodels/nonparametric/linbin.c",
"depends" : [],
"sources" : []},
_smoothers_lowess = {"name" : "statsmodels/nonparametric/_smoothers_lowess.c",
"depends" : [],
"sources" : []}
)
extensions = []
for name, data in ext_data.items():
data['sources'] = data.get('sources', []) + [data['name']]
destdir = ".".join(os.path.dirname(data["name"]).split("/"))
data.pop('name')
obj = Extension('%s.%s' % (destdir, name), **data)
extensions.append(obj)
def get_data_files():
sep = os.path.sep
# install the datasets
data_files = {}
root = pjoin(curdir, "statsmodels", "datasets")
for i in os.listdir(root):
if i is "tests":
continue
path = pjoin(root, i)
if os.path.isdir(path):
data_files.update({relpath(path, start=curdir).replace(sep, ".") : ["*.csv",
"*.dta"]})
# add all the tests and results files
for r, ds, fs in os.walk(pjoin(curdir, "statsmodels")):
r_ = relpath(r, start=curdir)
if r_.endswith('results'):
data_files.update({r_.replace(sep, ".") : ["*.csv",
"*.txt"]})
return data_files
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.unlink('MANIFEST')
min_versions = {
'numpy' : '1.4.0',
'scipy' : '0.7.0',
'pandas' : '0.7.1',
'patsy' : '0.1.0',
}
if sys.version_info[0] == 3 and sys.version_info[1] >= 3:
# 3.3 needs numpy 1.7+
min_versions.update({"numpy" : "1.7.0b2"})
(setup_requires,
install_requires) = check_dependency_versions(min_versions)
if _have_setuptools:
setuptools_kwargs['setup_requires'] = setup_requires
setuptools_kwargs['install_requires'] = install_requires
write_version_py()
# this adds *.csv and *.dta files in datasets folders
# and *.csv and *.txt files in test/results folders
package_data = get_data_files()
packages = find_packages()
packages.append("statsmodels.tsa.vector_ar.data")
package_data["statsmodels.datasets.tests"].append("*.zip")
package_data["statsmodels.iolib.tests.results"].append("*.dta")
package_data["statsmodels.stats.tests.results"].append("*.json")
package_data["statsmodels.tsa.vector_ar.tests.results"].append("*.npz")
# data files that don't follow the tests/results pattern. should fix.
package_data.update({"statsmodels.stats.tests" : ["*.txt"]})
package_data.update({"statsmodels.stats.libqsturng" :
["*.r", "*.txt", "*.dat"]})
package_data.update({"statsmodels.stats.libqsturng.tests" :
["*.csv", "*.dat"]})
package_data.update({"statsmodels.tsa.vector_ar.data" : ["*.dat"]})
package_data.update({"statsmodels.tsa.vector_ar.data" : ["*.dat"]})
# temporary, until moved:
package_data.update({"statsmodels.sandbox.regression.tests" :
["*.dta", "*.csv"]})
#TODO: deal with this. Not sure if it ever worked for bdists
#('docs/build/htmlhelp/statsmodelsdoc.chm',
# 'statsmodels/statsmodelsdoc.chm')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')) and not no_frills:
# Generate Cython sources, unless building from source release
generate_cython()
setup(name = DISTNAME,
version = VERSION,
maintainer = MAINTAINER,
ext_modules = extensions,
maintainer_email = MAINTAINER_EMAIL,
description = DESCRIPTION,
license = LICENSE,
url = URL,
download_url = DOWNLOAD_URL,
long_description = LONG_DESCRIPTION,
classifiers = classifiers,
platforms = 'any',
cmdclass = cmdclass,
packages = packages,
package_data = package_data,
include_package_data=False, # True will install all files in repo
**setuptools_kwargs)
| bsd-3-clause |
dwillmer/blaze | blaze/compute/tests/test_spark.py | 2 | 7666 | from __future__ import absolute_import, division, print_function
import pytest
pyspark = pytest.importorskip('pyspark')
import pandas as pd
from blaze import compute, symbol, summary, exp, by, join, merge
from toolz import identity
data = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
data2 = [['Alice', 'Austin'],
['Bob', 'Boston']]
df = pd.DataFrame(data, columns=['name', 'amount', 'id'])
# this only exists because we need to have a single session scoped spark
# context, otherwise these would simply be global variables
@pytest.fixture
def rdd(sc):
return sc.parallelize(data)
@pytest.fixture
def rdd2(sc):
return sc.parallelize(data2)
t = symbol('t', 'var * {name: string, amount: int, id: int}')
t2 = symbol('t2', 'var * {name: string, city: string}')
# Web Commons Graph Example data
data_idx = [['A', 1],
['B', 2],
['C', 3]]
data_arc = [[1, 3],
[2, 3],
[3, 1]]
t_idx = symbol('idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('arc', 'var * {node_out: int32, node_id: int32}')
def test_spark_symbol(rdd):
assert compute(t, rdd) == rdd
def test_spark_projection(rdd):
assert compute(t['name'], rdd).collect() == [row[0] for row in data]
def test_spark_multicols_projection(rdd):
result = compute(t[['amount', 'name']], rdd).collect()
expected = [(100, 'Alice'), (200, 'Bob'), (50, 'Alice')]
print(result)
print(expected)
assert result == expected
inc = lambda x: x + 1
reduction_exprs = [
t['amount'].sum(),
t['amount'].min(),
t['amount'].max(),
t['amount'].nunique(),
t['name'].nunique(),
t['amount'].count(),
(t['amount'] > 150).any(),
(t['amount'] > 150).all(),
t['amount'].mean(),
t['amount'].var(),
summary(a=t.amount.sum(), b=t.id.count()),
t['amount'].std()]
def test_spark_reductions(rdd):
for expr in reduction_exprs:
result = compute(expr, rdd)
expected = compute(expr, data)
if not result == expected:
print(result)
print(expected)
if isinstance(result, float):
assert abs(result - expected) < 0.001
else:
assert result == expected
exprs = [
t['amount'],
t['amount'] == 100,
t['amount'].truncate(150),
t[t['name'] == 'Alice'],
t[t['amount'] == 0],
t[t['amount'] > 150],
t['amount'] + t['id'],
t['amount'] % t['id'],
exp(t['amount']),
by(t['name'], total=t['amount'].sum()),
by(t['name'], total=(t['amount'] + 1).sum()),
(t['amount'] * 1).label('foo'),
t.map(lambda tup: tup[1] + tup[2], 'real'),
t.like(name='Alice'),
t['amount'].apply(identity, 'var * real', splittable=True),
t['amount'].map(inc, 'int')]
def test_spark_basic(rdd):
check_exprs_against_python(exprs, data, rdd)
def check_exprs_against_python(exprs, data, rdd):
any_bad = False
for expr in exprs:
result = compute(expr, rdd).collect()
expected = list(compute(expr, data))
if not result == expected:
any_bad = True
print("Expression:", expr)
print("Spark:", result)
print("Python:", expected)
assert not any_bad
def test_spark_big_by(sc):
tbig = symbol(
'tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')
big_exprs = [
by(tbig[['name', 'sex']], total=tbig['amount'].sum()),
by(tbig[['name', 'sex']], total=(tbig['id'] + tbig['amount']).sum())]
databig = [['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]]
rddbig = sc.parallelize(databig)
check_exprs_against_python(big_exprs, databig, rddbig)
def test_head(rdd):
assert list(compute(t.head(1), rdd)) == list(compute(t.head(1), data))
def test_sort(rdd):
check_exprs_against_python([
t.sort('amount'),
t.sort('amount', ascending=True),
t.sort(t['amount'], ascending=True),
t.sort(-t['amount'].label('foo') + 1, ascending=True),
t.sort(['amount', 'id'])], data, rdd)
def test_distinct(rdd):
assert set(compute(t['name'].distinct(), rdd).collect()) == \
set(['Alice', 'Bob'])
def test_join(rdd, rdd2):
joined = join(t, t2, 'name')
expected = [('Alice', 100, 1, 'Austin'),
('Bob', 200, 2, 'Boston'),
('Alice', 50, 3, 'Austin')]
result = compute(joined, {t: rdd, t2: rdd2}).collect()
assert all(i in expected for i in result)
def test_multi_column_join(sc):
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
rleft = sc.parallelize(left)
rright = sc.parallelize(right)
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {x: int, y: int, w: int}')
j = join(L, R, ['x', 'y'])
result = compute(j, {L: rleft, R: rright})
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
assert set(result.collect()) == set(expected)
def test_groupby(sc):
rddidx = sc.parallelize(data_idx)
rddarc = sc.parallelize(data_arc)
joined = join(t_arc, t_idx, "node_id")
t = by(joined['name'], count=joined['node_id'].count())
a = compute(t, {t_arc: rddarc, t_idx: rddidx})
in_degree = dict(a.collect())
assert in_degree == {'A': 1, 'C': 2}
def test_multi_level_rowfunc_works(rdd):
expr = t['amount'].map(lambda x: x + 1, 'int')
assert compute(expr, rdd).collect() == [x[1] + 1 for x in data]
def test_merge(rdd):
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
assert compute(expr, rdd).collect() == [
(row[0], row[1] * 2) for row in data]
def test_selection_out_of_order(rdd):
expr = t['name'][t['amount'] < 100]
assert compute(expr, rdd).collect() == ['Alice']
def test_recursive_rowfunc_is_used(rdd):
expr = by(t['name'], total=(2 * (t['amount'] + t['id'])).sum())
expected = [('Alice', 2 * (101 + 53)),
('Bob', 2 * (202))]
assert set(compute(expr, rdd).collect()) == set(expected)
def test_outer_join(sc):
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = sc.parallelize(left)
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = sc.parallelize(right)
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
assert set(compute(join(L, R), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='left'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='right'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# Full outer join not yet supported
assert set(compute(join(L, R, how='outer'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
| bsd-3-clause |
LouisePaulDelvaux/openfisca-france-data | openfisca_france_data/input_data_builders/build_openfisca_survey_data/step_04_famille.py | 1 | 25795 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from pandas import concat, DataFrame
from openfisca_france_data.temporary import TemporaryStore
from openfisca_france_data.input_data_builders.build_openfisca_survey_data.utils import assert_dtype
log = logging.getLogger(__name__)
# Retreives the families
# Creates 'idfam' and 'quifam' variables
def control_04(dataframe, base):
log.info(u"longueur de la dataframe après opération :".format(len(dataframe.index)))
log.info(u"contrôle des doublons : il y a {} individus en double".format(any(dataframe.duplicated(cols='noindiv'))))
log.info(u"contrôle des colonnes : il y a {} colonnes".format(len(dataframe.columns)))
log.info(u"Il y a {} de familles différentes".format(len(set(dataframe.noifam.values))))
log.info(u"contrôle: {} noifam are NaN:".format(len(dataframe[dataframe['noifam'].isnull()])))
log.info(u"{} lignes dans dataframe vs {} lignes dans base".format(len(dataframe.index), len(base.index)))
assert len(dataframe.index) <= len(base.index), u"dataframe has too many rows compared to base"
def subset_base(base, famille):
"""
Generates a dataframe containing the values of base that are not already in famille
"""
return base[~(base.noindiv.isin(famille.noindiv.values))].copy()
def famille(year = 2006):
temporary_store = TemporaryStore.create(file_name = "erfs")
log.info('step_04_famille: construction de la table famille')
# On suit la méthode décrite dans le Guide ERF_2002_rétropolée page 135
# TODO: extraire ces valeurs d'un fichier de paramètres de législation
if year == 2006:
smic = 1254
elif year == 2007:
smic = 1280
elif year == 2008:
smic = 1308
elif year == 2009:
smic = 1337
else:
log.info("smic non défini")
# TODO check if we can remove acteu forter etc since dealt with in 01_pre_proc
log.info('Etape 1 : préparation de base')
log.info(' 1.1 : récupération de indivi')
indivi = temporary_store['indivim_{}'.format(year)]
indivi['year'] = year
indivi["noidec"] = indivi["declar1"].str[0:2].copy() # Not converted to int because some NaN are present
indivi["agepf"] = (
(indivi.naim < 7) * (indivi.year - indivi.naia)
+ (indivi.naim >= 7) * (indivi.year - indivi.naia - 1)
).astype(object) # TODO: naia has some NaN but naim do not and then should be an int
indivi = indivi[~(
(indivi.lien == 6) & (indivi.agepf < 16) & (indivi.quelfic == "EE")
)].copy()
assert_dtype(indivi.year, "int64")
for series_name in ['agepf', 'noidec']: # integer with NaN
assert_dtype(indivi[series_name], "object")
log.info(' 1.2 : récupération des enfants à naître')
individual_variables = [
'acteu',
'actrec',
'agepf',
'agepr',
'cohab',
'contra',
'declar1',
'forter',
'ident',
'lien',
'lpr',
'mrec',
'naia',
'naim',
'noi',
'noicon',
'noidec',
'noimer',
'noindiv',
'noiper',
'persfip',
'quelfic',
'retrai',
'rga',
'rstg',
'sexe',
'stc',
'titc',
'year',
'ztsai',
]
enfants_a_naitre = temporary_store['enfants_a_naitre_{}'.format(year)][individual_variables].copy()
enfants_a_naitre.drop_duplicates('noindiv', inplace = True)
log.info(u""""
Il y a {} enfants à naitre avant de retirer ceux qui ne sont pas enfants
de la personne de référence
""".format(len(enfants_a_naitre.index)))
enfants_a_naitre = enfants_a_naitre[enfants_a_naitre.lpr == 3].copy()
enfants_a_naitre = enfants_a_naitre[~(enfants_a_naitre.noindiv.isin(indivi.noindiv.values))].copy()
log.info(u""""
Il y a {} enfants à naitre après avoir retiré ceux qui ne sont pas enfants
de la personne de référence
""".format(len(enfants_a_naitre.index)))
# PB with vars "agepf" "noidec" "year" NOTE: quels problèmes ? JS
log.info(u" 1.3 : création de la base complète")
base = concat([indivi, enfants_a_naitre])
log.info(u"base contient {} lignes ".format(len(base.index)))
base['noindiv'] = (100 * base.ident + base['noi']).astype(int)
base['m15'] = base.agepf < 16
base['p16m20'] = (base.agepf >= 16) & (base.agepf <= 20)
base['p21'] = base.agepf >= 21
base['ztsai'].fillna(0, inplace = True)
base['smic55'] = base['ztsai'] >= (smic * 12 * 0.55) # 55% du smic mensuel brut
base['famille'] = 0
base['kid'] = False
for series_name in ['kid', 'm15', 'p16m20', 'p21', 'smic55']:
assert_dtype(base[series_name], "bool")
assert_dtype(base.famille, "int")
# TODO: remove or clean from NA assert_dtype(base.ztsai, "int")
log.info(u"Etape 2 : On cherche les enfants ayant père et/ou mère")
personne_de_reference = base[['ident', 'noi']][base.lpr == 1].copy()
personne_de_reference['noifam'] = (100 * personne_de_reference.ident + personne_de_reference['noi']).astype(int)
personne_de_reference = personne_de_reference[['ident', 'noifam']].copy()
log.info(u"length personne_de_reference : {}".format(len(personne_de_reference.index)))
nof01 = base[(base.lpr.isin([1, 2])) | ((base.lpr == 3) & (base.m15)) |
((base.lpr == 3) & (base.p16m20) & (~base.smic55))].copy()
log.info('longueur de nof01 avant merge : {}'.format(len(nof01.index)))
nof01 = nof01.merge(personne_de_reference, on='ident', how='outer')
nof01['famille'] = 10
nof01['kid'] = (
(nof01.lpr == 3) & (nof01.m15)
) | (
(nof01.lpr == 3) & (nof01.p16m20) & ~(nof01.smic55)
)
for series_name in ['famille', 'noifam']:
assert_dtype(nof01[series_name], "int")
assert_dtype(nof01.kid, "bool")
famille = nof01.copy()
del nof01
control_04(famille, base)
log.info(u" 2.1 : identification des couples")
# l'ID est le noi de l'homme
hcouple = subset_base(base, famille)
hcouple = hcouple[(hcouple.cohab == 1) & (hcouple.lpr >= 3) & (hcouple.sexe == 1)].copy()
hcouple['noifam'] = (100 * hcouple.ident + hcouple.noi).astype(int)
hcouple['famille'] = 21
for series_name in ['famille', 'noifam']:
assert_dtype(hcouple[series_name], "int")
log.info(u"longueur hcouple : ".format(len(hcouple.index)))
log.info(u" 2.2 : attributing the noifam to the wives")
fcouple = base[~(base.noindiv.isin(famille.noindiv.values))].copy()
fcouple = fcouple[(fcouple.cohab == 1) & (fcouple.lpr >= 3) & (fcouple.sexe == 2)].copy()
# l'identifiant de la famille est celui du conjoint de la personne de référence du ménage
fcouple['noifam'] = (100 * fcouple.ident + fcouple.noicon).astype(int)
fcouple['famille'] = 22
for series_name in ['famille', 'noifam']:
assert_dtype(fcouple[series_name], "int")
log.info(u"Il y a {} enfants avec parents en fcouple".format(len(fcouple.index)))
famcom = fcouple.merge(hcouple, on='noifam', how='outer')
log.info(u"longueur fancom après fusion : {}".format(len(famcom.index)))
fcouple = fcouple.merge(famcom) # TODO : check s'il ne faut pas faire un inner merge sinon présence de doublons
log.info(u"longueur fcouple après fusion : {}".format(len(fcouple.index)))
famille = concat([famille, hcouple, fcouple], join='inner')
control_04(famille, base)
log.info(u"Etape 3: Récupération des personnes seules")
log.info(u" 3.1 : personnes seules de catégorie 1")
seul1 = base[~(base.noindiv.isin(famille.noindiv.values))].copy()
seul1 = seul1[(seul1.lpr.isin([3, 4])) & ((seul1.p16m20 & seul1.smic55) | seul1.p21) & (seul1.cohab == 1) &
(seul1.sexe == 2)].copy()
if len(seul1.index) > 0:
seul1['noifam'] = (100 * seul1.ident + seul1.noi).astype(int)
seul1['famille'] = 31
for series_name in ['famille', 'noifam']:
assert_dtype(seul1[series_name], "int")
famille = concat([famille, seul1])
control_04(famille, base)
log.info(u" 3.1 personnes seules de catégorie 2")
seul2 = base[~(base.noindiv.isin(famille.noindiv.values))].copy()
seul2 = seul2[(seul2.lpr.isin([3, 4])) & seul2.p16m20 & seul2.smic55 & (seul2.cohab != 1)].copy()
seul2['noifam'] = (100 * seul2.ident + seul2.noi).astype(int)
seul2['famille'] = 32
for series_name in ['famille', 'noifam']:
assert_dtype(seul2[series_name], "int")
famille = concat([famille, seul2])
control_04(famille, base)
log.info(u" 3.3 personnes seules de catégorie 3")
seul3 = subset_base(base, famille)
seul3 = seul3[(seul3.lpr.isin([3, 4])) & seul3.p21 & (seul3.cohab != 1)].copy()
# TODO: CHECK erreur dans le guide méthodologique ERF 2002 lpr 3,4 au lieu de 3 seulement
seul3['noifam'] = (100 * seul3.ident + seul3.noi).astype(int)
seul3['famille'] = 33
for series_name in ['famille', 'noifam']:
assert_dtype(seul3[series_name], "int")
famille = concat([famille, seul3])
control_04(famille, base)
log.info(u" 3.4 : personnes seules de catégorie 4")
seul4 = subset_base(base, famille)
seul4 = seul4[(seul4.lpr == 4) & seul4.p16m20 & ~(seul4.smic55) & (seul4.noimer.isnull()) &
(seul4.persfip == 'vous')].copy()
if len(seul4.index) > 0:
seul4['noifam'] = (100 * seul4.ident + seul4.noi).astype(int)
seul4['famille'] = 34
famille = concat([famille, seul4])
for series_name in ['famille', 'noifam']:
assert_dtype(seul4[series_name], "int")
control_04(famille, base)
log.info(u"Etape 4 : traitement des enfants")
log.info(u" 4.1 : enfant avec mère")
avec_mere = subset_base(base, famille)
avec_mere = avec_mere[((avec_mere.lpr == 4) & ((avec_mere.p16m20 == 1) | (avec_mere.m15 == 1)) &
(avec_mere.noimer.notnull()))].copy()
avec_mere['noifam'] = (100 * avec_mere.ident + avec_mere.noimer).astype(int)
avec_mere['famille'] = 41
avec_mere['kid'] = True
for series_name in ['famille', 'noifam']:
assert_dtype(avec_mere[series_name], "int")
assert_dtype(avec_mere.kid, "bool")
# On récupère les mères des enfants
mereid = DataFrame(avec_mere['noifam'].copy()) # Keep a DataFrame instead of a Series to deal with rename and merge
# Ces mères peuvent avoir plusieurs enfants, or il faut unicité de l'identifiant
mereid.rename(columns = {'noifam': 'noindiv'}, inplace = True)
mereid.drop_duplicates(inplace = True)
mere = mereid.merge(base)
mere['noifam'] = (100 * mere.ident + mere.noi).astype(int)
mere['famille'] = 42
for series_name in ['famille', 'noifam']:
assert_dtype(mere[series_name], "int")
avec_mere = avec_mere[avec_mere.noifam.isin(mereid.noindiv.values)].copy()
log.info(u"Contrôle de famille après ajout des pères")
control_04(mere, base)
famille = famille[~(famille.noindiv.isin(mere.noindiv.values))].copy()
control_04(famille, base)
# on retrouve les conjoints des mères
conj_mereid = mere[['ident', 'noicon', 'noifam']].copy()[mere.noicon.notnull()].copy()
conj_mereid['noindiv'] = 100 * conj_mereid.ident + conj_mereid.noicon
assert_dtype(conj_mereid[series_name], "int")
conj_mereid = conj_mereid[['noindiv', 'noifam']].copy()
conj_mereid = conj_mereid.merge(base)
control_04(conj_mereid, base)
conj_mere = conj_mereid.merge(base)
conj_mere['famille'] = 43
for series_name in ['famille', 'noifam']:
assert_dtype(conj_mereid[series_name], "int")
famille = famille[~(famille.noindiv.isin(conj_mere.noindiv.values))].copy()
famille = concat([famille, avec_mere, mere, conj_mere])
control_04(famille, base)
del avec_mere, mere, conj_mere, mereid, conj_mereid
log.info(u" 4.2 : enfants avec père")
avec_pere = subset_base(base, famille)
avec_pere = avec_pere[(avec_pere.lpr == 4) &
((avec_pere.p16m20 == 1) | (avec_pere.m15 == 1)) &
(avec_pere.noiper.notnull())]
avec_pere['noifam'] = (100 * avec_pere.ident + avec_pere.noiper).astype(int)
avec_pere['famille'] = 44
avec_pere['kid'] = True
# TODO: hack to deal with the problem of presence of NaN in avec_pere
# avec_pere.dropna(subset = ['noifam'], how = 'all', inplace = True)
assert avec_pere['noifam'].notnull().all(), 'presence of NaN in avec_pere'
for series_name in ['famille', 'noifam']:
assert_dtype(avec_pere[series_name], "int")
assert_dtype(avec_pere.kid, "bool")
pereid = DataFrame(avec_pere['noifam']) # Keep a DataFrame instead of a Series to deal with rename and merge
pereid.rename(columns = {'noifam': 'noindiv'}, inplace = True)
pereid.drop_duplicates(inplace = True)
pere = pereid.merge(base)
pere['noifam'] = (100 * pere.ident + pere.noi).astype(int)
pere['famille'] = 45
famille = famille[~(famille.noindiv.isin(pere.noindiv.values))].copy()
# On récupère les conjoints des pères
conj_pereid = pere[['ident', 'noicon', 'noifam']].copy()[pere.noicon.notnull()].copy()
conj_pereid['noindiv'] = (100 * conj_pereid.ident + conj_pereid.noicon).astype(int)
conj_pereid = conj_pereid[['noindiv', 'noifam']].copy()
conj_pere = conj_pereid.merge(base)
control_04(conj_pere, base)
if len(conj_pere.index) > 0:
conj_pere['famille'] = 46
for series_name in ['famille', 'noifam']:
assert_dtype(conj_pere[series_name], "int")
famille = famille[~(famille.noindiv.isin(conj_pere.noindiv.values))].copy()
famille = concat([famille, avec_pere, pere, conj_pere])
log.info(u"Contrôle de famille après ajout des pères")
control_04(famille, base)
del avec_pere, pere, pereid, conj_pere, conj_pereid
log.info(u" 4.3 : enfants avec déclarant")
avec_dec = subset_base(base, famille)
avec_dec = avec_dec[
(avec_dec.persfip == "pac") &
(avec_dec.lpr == 4) &
(
(avec_dec.p16m20 & ~(avec_dec.smic55)) | (avec_dec.m15 == 1)
)
]
avec_dec['noifam'] = (100 * avec_dec.ident + avec_dec.noidec.astype('int')).astype('int')
avec_dec['famille'] = 47
avec_dec['kid'] = True
for series_name in ['famille', 'noifam']:
assert_dtype(avec_dec[series_name], "int")
assert_dtype(avec_dec.kid, "bool")
control_04(avec_dec, base)
# on récupère les déclarants pour leur attribuer une famille propre
declarant_id = DataFrame(avec_dec['noifam'].copy()).rename(columns={'noifam': 'noindiv'})
declarant_id.drop_duplicates(inplace = True)
dec = declarant_id.merge(base)
dec['noifam'] = (100 * dec.ident + dec.noi).astype(int)
dec['famille'] = 48
for series_name in ['famille', 'noifam']:
assert_dtype(dec[series_name], "int")
famille = famille[~(famille.noindiv.isin(dec.noindiv.values))].copy()
famille = concat([famille, avec_dec, dec])
del dec, declarant_id, avec_dec
control_04(famille, base)
log.info(u"Etape 5 : Récupération des enfants fip")
log.info(u" 5.1 : Création de la df fip")
individual_variables_fip = [
'acteu',
'actrec',
'agepf',
'agepr',
'cohab',
'contra',
'declar1',
'forter',
'ident',
'lien',
'lpr',
'mrec',
'naia',
'naim',
'noi',
'noicon',
'noidec',
'noimer',
'noindiv',
'noiper',
'persfip',
'quelfic',
'retrai',
'rga',
'rstg',
'sexe',
'stc',
'titc',
'year',
'ztsai',
]
fip = temporary_store['fipDat_{}'.format(year)][individual_variables_fip].copy()
# Variables auxilaires présentes dans base qu'il faut rajouter aux fip'
# WARNING les noindiv des fip sont construits sur les ident des déclarants
# pas d'orvelap possible avec les autres noindiv car on a des noi =99, 98, 97 ,...'
fip['m15'] = (fip.agepf < 16)
fip['p16m20'] = ((fip.agepf >= 16) & (fip.agepf <= 20))
fip['p21'] = (fip.agepf >= 21)
fip['smic55'] = (fip.ztsai >= smic * 12 * 0.55)
fip['famille'] = 0
fip['kid'] = False
for series_name in ['kid', 'm15', 'p16m20', 'p21', 'smic55']:
assert_dtype(fip[series_name], "bool")
for series_name in ['famille']:
assert_dtype(fip[series_name], "int")
# # base <- rbind(base,fip)
# # table(base$quelfic)
# # enfant_fip <- base[(!base$noindiv %in% famille$noindiv),]
# # enfant_fip <- subset(enfant_fip, (quelfic=="FIP") & (( (agepf %in% c(19,20)) & !smic55 ) | (naia==year & rga=='6')) ) # TODO check year ou year-1 !
# # enfant_fip <- within(enfant_fip,{
# # noifam=100*ident+noidec
# # famille=50
# # kid=TRUE})
# # # ident=NA}) # TODO : je ne sais pas quoi mettre un NA fausse les manips suivantes
# # famille <- rbind(famille,enfant_fip)
# #
# # # TODO: En 2006 on peut faire ce qui suit car tous les parents fip sont déjà dans une famille
# # parent_fip <- famille[famille$noindiv %in% enfant_fip$noifam,]
# # any(enfant_fip$noifam %in% parent_fip$noindiv)
# # parent_fip <- within(parent_fip,{
# # noifam <- noindiv
# # famille <- 51
# # kid <- FALSE})
# # famille[famille$noindiv %in% enfant_fip$noifam,] <- parent_fip
# # # TODO quid du conjoint ?
log.info(u" 5.2 : extension de base avec les fip")
base_ = concat([base, fip])
enfant_fip = subset_base(base_, famille)
enfant_fip = enfant_fip[
(enfant_fip.quelfic == "FIP") & (
(enfant_fip.agepf.isin([19, 20]) & ~(enfant_fip.smic55)) |
((enfant_fip.naia == enfant_fip.year - 1) & (enfant_fip.rga.astype('int') == 6))
)
].copy()
enfant_fip['noifam'] = (100 * enfant_fip.ident + enfant_fip.noidec).astype(int)
enfant_fip['famille'] = 50
enfant_fip['kid'] = True
enfant_fip['ident'] = None # TODO: should we really do this ?
assert_dtype(enfant_fip.kid, "bool")
for series_name in ['famille', 'noifam']:
assert_dtype(enfant_fip[series_name], "int")
control_04(enfant_fip, base)
famille = concat([famille, enfant_fip])
base = concat([base, enfant_fip])
parent_fip = famille[famille.noindiv.isin(enfant_fip.noifam.values)].copy()
assert (enfant_fip.noifam.isin(parent_fip.noindiv.values)).any(), \
"{} doublons entre enfant_fip et parent fip !".format((enfant_fip.noifam.isin(parent_fip.noindiv.values)).sum())
parent_fip['noifam'] = parent_fip['noindiv'].values.copy()
parent_fip['famille'] = 51
parent_fip['kid'] = False
log.info(u"Contrôle de parent_fip")
control_04(parent_fip, base)
control_04(famille, base)
famille = famille.merge(parent_fip, how='outer')
# duplicated_individuals = famille.noindiv.duplicated()
# TODO: How to prevent failing in the next assert and avoiding droppping duplicates ?
# assert not duplicated_individuals.any(), "{} duplicated individuals in famille".format(
# duplicated_individuals.sum())
famille = famille.drop_duplicates(subset = 'noindiv', take_last = True)
control_04(famille, base)
del enfant_fip, fip, parent_fip
# # message('Etape 6 : non attribué')
# # non_attribue1 <- base[(!base$noindiv %in% famille$noindiv),]
# # non_attribue1 <- subset(non_attribue1,
# # (quelfic!="FIP") & (m15 | (p16m20&(lien %in% c(1,2,3,4) & agepr>=35)))
# # )
# # # On rattache les moins de 15 ans avec la PR (on a déjà éliminé les enfants en nourrice)
# # non_attribue1 <- merge(pr,non_attribue1)
# # non_attribue1 <- within(non_attribue1,{
# # famille <- ifelse(m15,61,62)
# # kid <- TRUE })
# #
# # rm(pr)
# # famille <- rbind(famille,non_attribue1)
# # dup <- duplicated(famille$noindiv)
# # table(dup)
# # rm(non_attribue1)
# # table(famille$famille, useNA="ifany")
# #
# # non_attribue2 <- base[(!base$noindiv %in% famille$noindiv) & (base$quelfic!="FIP"),]
# # non_attribue2 <- within(non_attribue2,{
# # noifam <- 100*ident+noi # l'identifiant est celui du jeune */
# # kid<-FALSE
# # famille<-63})
# #
# # famille <- rbind(famille,non_attribue2)
log.info(u"Etape 6 : gestion des non attribués")
log.info(u" 6.1 : non attribués type 1")
non_attribue1 = subset_base(base, famille)
non_attribue1 = non_attribue1[
~(non_attribue1.quelfic != 'FIP') & (
non_attribue1.m15 | (
non_attribue1.p16m20 & (non_attribue1.lien.isin(range(1, 5))) & (non_attribue1.agepr >= 35)
)
)
].copy()
# On rattache les moins de 15 ans avec la PR (on a déjà éliminé les enfants en nourrice)
non_attribue1 = non_attribue1.merge(personne_de_reference)
control_04(non_attribue1, base)
non_attribue1['famille'] = 61 * non_attribue1.m15 + 62 * ~(non_attribue1.m15)
non_attribue1['kid'] = True
assert_dtype(non_attribue1.kid, "bool")
assert_dtype(non_attribue1.famille, "int")
famille = concat([famille, non_attribue1])
control_04(famille, base)
del personne_de_reference, non_attribue1
log.info(u" 6.2 : non attribué type 2")
non_attribue2 = base[(~(base.noindiv.isin(famille.noindiv.values)) & (base.quelfic != "FIP"))].copy()
non_attribue2['noifam'] = (100 * non_attribue2.ident + non_attribue2.noi).astype(int)
non_attribue2['kid'] = False
non_attribue2['famille'] = 63
assert_dtype(non_attribue2.kid, "bool")
for series_name in ['famille', 'noifam']:
assert_dtype(non_attribue2[series_name], "int")
famille = concat([famille, non_attribue2], join='inner')
control_04(famille, base)
del non_attribue2
# Sauvegarde de la table famille
log.info(u"Etape 7 : Sauvegarde de la table famille")
log.info(u" 7.1 : Mise en forme finale")
# TODO: nettoyer les champs qui ne servent plus à rien
# famille['idec'] = famille['declar1'].str[3:11]
# famille['idec'].apply(lambda x: str(x)+'-')
# famille['idec'] += famille['declar1'].str[0:2]
famille['chef'] = (famille['noifam'] == (100 * famille.ident + famille.noi))
assert_dtype(famille.chef, "bool")
famille.reset_index(inplace = True)
control_04(famille, base)
log.info(u" 7.2 : création de la colonne rang")
famille['rang'] = famille.kid.astype('int')
while any(famille[(famille.rang != 0)].duplicated(subset = ['rang', 'noifam'])):
famille["rang"][famille.rang != 0] += famille[famille.rang != 0].copy().duplicated(
cols = ["rang", 'noifam']).values
log.info(u"nb de rangs différents : {}".format(len(set(famille.rang.values))))
log.info(u" 7.3 : création de la colonne quifam et troncature")
log.info(u"value_counts chef : \n {}".format(famille['chef'].value_counts()))
log.info(u"value_counts kid :' \n {}".format(famille['kid'].value_counts()))
famille['quifam'] = -1
# famille['quifam'] = famille['quifam'].where(famille['chef'].values, 0)
# ATTENTTION : ^ stands for XOR
famille.quifam = (0 +
((~famille['chef']) & (~famille['kid'])).astype(int) +
famille.kid * famille.rang
).astype('int')
# TODO: Test a groupby to improve the following this (should be placed )
# assert famille['chef'].sum() == len(famille.noifam.unique()), \
# 'The number of family chiefs {} is different from the number of families {}'.format(
# famille['chef'].sum(),
# len(famille.idfam.unique())
# )
# famille['noifam'] = famille['noifam'].astype('int')
log.info(u"value_counts quifam : \n {}".format(famille['quifam'].value_counts()))
famille = famille[['noindiv', 'quifam', 'noifam']].copy()
famille.rename(columns = {'noifam': 'idfam'}, inplace = True)
log.info(u"Vérifications sur famille")
# TODO: we drop duplicates if any
log.info(u"There are {} duplicates of quifam inside famille, we drop them".format(
famille.duplicated(subset = ['idfam', 'quifam']).sum())
)
famille.drop_duplicates(subset = ['idfam', 'quifam'], inplace = True)
# assert not(famille.duplicated(cols=['idfam', 'quifam']).any()), \
# 'There are {} duplicates of quifam inside famille'.format(
# famille.duplicated(cols=['idfam', 'quifam']).sum())
temporary_store["famc_{}".format(year)] = famille
del indivi, enfants_a_naitre
if __name__ == '__main__':
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
famille()
log.info(u"étape 04 famille terminée")
| agpl-3.0 |
jonathanstrong/NAB | nab/detectors/dummy/algorithms.py | 6 | 9416 |
"""
This is no man's land. Do anything you want in here,
as long as you return a boolean that determines whether the input
timeseries is anomalous or not.
To add an algorithm, define it here, and add its name to settings.ALGORITHMS.
"""
import pandas
import numpy as np
import traceback
from time import time
from datetime import datetime, timedelta
def tail_avg(timeseries):
"""
This is a utility function used to calculate the average of the last three
datapoints in the series as a measure, instead of just the last datapoint.
It reduces noise, but it also reduces sensitivity and increases the delay
to detection.
"""
try:
t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3
return t
except IndexError:
return timeseries[-1][1]
def median_absolute_deviation(timeseries):
"""
A timeseries is anomalous if the deviation of its latest datapoint with
respect to the median is X times larger than the median of deviations.
"""
series = pandas.Series([x[1] for x in timeseries])
median = series.median()
demedianed = np.abs(series - median)
median_deviation = demedianed.median()
# The test statistic is infinite when the median is zero,
# so it becomes super sensitive. We play it safe and skip when this happens.
if median_deviation == 0:
return False
test_statistic = demedianed.iget(-1) / median_deviation
# Completely arbitary...triggers if the median deviation is
# 6 times bigger than the median
if test_statistic > 6:
return True
else:
return False
# def grubbs(timeseries):
# """
# A timeseries is anomalous if the Z score is greater than the Grubb's
# score.
# """
# series = np.array([x[1] for x in timeseries])
# stdDev = np.std(series)
# mean = np.mean(series)
# tail_average = tail_avg(timeseries)
# z_score = (tail_average - mean) / stdDev
# len_series = len(series)
# threshold = scipy.stats.t.isf(.05 / (2 * len_series), len_series - 2)
# threshold_squared = threshold * threshold
# grubbs_score = ((len_series - 1) / np.sqrt(len_series)) * np.sqrt(
# threshold_squared / (len_series - 2 + threshold_squared))
# return z_score > grubbs_score
def first_hour_average(timeseries):
"""
Calcuate the simple average over one hour, one day ago.
A timeseries is anomalous if the average of the last three datapoints
are outside of three standard deviations of this value.
"""
day = timedelta(days=1)
hour = timedelta(hours=1)
last_hour_threshold = timeseries[-1][0] - (day - hour)
startTime = last_hour_threshold - hour
series = pandas.Series([x[1] for x in timeseries
if x[0] >= startTime
and x[0] < last_hour_threshold])
mean = (series).mean()
stdDev = (series).std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the average. This does not exponentially weight the MA and so
is better for detecting anomalies with respect to the entire series.
"""
series = pandas.Series([x[1] for x in timeseries])
mean = series.mean()
stdDev = series.std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
def stddev_from_moving_average(timeseries):
"""
A timeseries is anomalous if the absolute value of the average of the latest
three datapoint minus the moving average is greater than three standard
deviations of the moving average. This is better for finding anomalies with
respect to the short term trends.
"""
series = pandas.Series([x[1] for x in timeseries])
expAverage = pandas.stats.moments.ewma(series, com=50)
stdDev = pandas.stats.moments.ewmstd(series, com=50)
return abs(series.iget(-1) - expAverage.iget(-1)) > 3 * stdDev.iget(-1)
def mean_subtraction_cumulation(timeseries):
"""
A timeseries is anomalous if the value of the next datapoint in the
series is farther than three standard deviations out in cumulative terms
after subtracting the mean from each data point.
"""
series = pandas.Series([x[1] if x[1] else 0 for x in timeseries])
series = series - series[0:len(series) - 1].mean()
stdDev = series[0:len(series) - 1].std()
expAverage = pandas.stats.moments.ewma(series, com=15)
return abs(series.iget(-1)) > 3 * stdDev
def least_squares(timeseries):
"""
A timeseries is anomalous if the average of the last three datapoints
on a projected least squares model is greater than three sigma.
"""
x = np.array(
[(t[0] - datetime(1970,1,1)).total_seconds() for t in timeseries])
y = np.array([t[1] for t in timeseries])
A = np.vstack([x, np.ones(len(x))]).T
results = np.linalg.lstsq(A, y)
residual = results[1]
m, c = np.linalg.lstsq(A, y)[0]
errors = []
for i, value in enumerate(y):
projected = m * x[i] + c
error = value - projected
errors.append(error)
if len(errors) < 3:
return False
std_dev = np.std(errors)
t = (errors[-1] + errors[-2] + errors[-3]) / 3
return abs(t) > std_dev * 3 and round(std_dev) != 0 and round(t) != 0
def histogram_bins(timeseries):
"""
A timeseries is anomalous if the average of the last three datapoints falls
into a histogram bin with less than 20 other datapoints (you'll need to tweak
that number depending on your data)
Returns: the size of the bin which contains the tail_avg. Smaller bin size
means more anomalous.
"""
series = np.array([x[1] for x in timeseries])
t = tail_avg(timeseries)
h = np.histogram(series, bins=15)
bins = h[1]
for index, bin_size in enumerate(h[0]):
if bin_size <= 20:
# Is it in the first bin?
if index == 0:
if t <= bins[0]:
return True
# Is it in the current bin?
elif t >= bins[index] and t < bins[index + 1]:
return True
return False
# def ks_test(timeseries):
# """
# A timeseries is anomalous if 2 sample Kolmogorov-Smirnov test indicates
# that data distribution for last 10 minutes is different from last hour.
# It produces false positives on non-stationary series so Augmented
# Dickey-Fuller test applied to check for stationarity.
# """
# hour_ago = time() - 3600
# ten_minutes_ago = time() - 600
# reference = scipy.array(
# [x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])
# probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])
# if reference.size < 20 or probe.size < 20:
# return False
# ks_d, ks_p_value = scipy.stats.ks_2samp(reference, probe)
# if ks_p_value < 0.05 and ks_d > 0.5:
# adf = sm.tsa.stattools.adfuller(reference, 10)
# if adf[1] < 0.05:
# return True
# return False
# def is_anomalously_anomalous(metric_name, ensemble, datapoint):
# """
# This method runs a meta-analysis on the metric to determine whether the
# metric has a past history of triggering.
# TODO: weight intervals based on datapoint
# """
# # We want the datapoint to avoid triggering twice on the same data
# new_trigger = [time(), datapoint]
# # Get the old history
# raw_trigger_history = redis_conn.get("trigger_history." + metric_name)
# if not raw_trigger_history:
# redis_conn.set("trigger_history." + metric_name, packb(
# [(time(), datapoint)]))
# return True
# trigger_history = unpackb(raw_trigger_history)
# # Are we (probably) triggering on the same data?
# if (new_trigger[1] == trigger_history[-1][1] and
# new_trigger[0] - trigger_history[-1][0] <= 300):
# return False
# # Update the history
# trigger_history.append(new_trigger)
# redis_conn.set("trigger_history." + metric_name, packb(trigger_history))
# # Should we surface the anomaly?
# trigger_times = [x[0] for x in trigger_history]
# intervals = [
# trigger_times[i + 1] - trigger_times[i]
# for i, v in enumerate(trigger_times)
# if (i + 1) < len(trigger_times)
# ]
# series = pandas.Series(intervals)
# mean = series.mean()
# stdDev = series.std()
# return abs(intervals[-1] - mean) > 3 * stdDev
# def run_selected_algorithm(timeseries, metric_name):
# """
# Filter timeseries and run selected algorithm.
# """
# # Get rid of short series
# if len(timeseries) < MIN_TOLERABLE_LENGTH:
# raise TooShort()
# # Get rid of stale series
# if time() - timeseries[-1][0] > STALE_PERIOD:
# raise Stale()
# # Get rid of boring series
# if len(
# set(
# item[1] for item in timeseries[
# -MAX_TOLERABLE_BOREDOM:])) == BOREDOM_SET_SIZE:
# raise Boring()
# try:
# ensemble = [globals()[algorithm](timeseries) for algorithm in ALGORITHMS]
# threshold = len(ensemble) - CONSENSUS
# if ensemble.count(False) <= threshold:
# if ENABLE_SECOND_ORDER:
# if is_anomalously_anomalous(metric_name, ensemble, timeseries[-1][1]):
# return True, ensemble, timeseries[-1][1]
# else:
# return True, ensemble, timeseries[-1][1]
# return False, ensemble, timeseries[-1][1]
# except:
# logging.error("Algorithm error: " + traceback.format_exc())
# return False, [], 1 | agpl-3.0 |
ekostat/ekostat_calculator | core/data_handlers.py | 1 | 96099 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 08:10:06 2017
@author: a002028
"""
import os
import sys
current_path = os.path.dirname(os.path.realpath(__file__))[:-4]
if current_path not in sys.path:
sys.path.append(current_path)
import pandas as pd
import numpy as np
import time
import pickle
import utils
import core
import core.exceptions as exceptions
"""
#==============================================================================
#==============================================================================
"""
class ColumnDataHandler(object):
"""
"""
def __init__(self):
super().__init__()
"""
#==============================================================================
#==============================================================================
"""
class RowDataHandler(object):
"""
"""
def __init__(self):
super().__init__()
#==========================================================================
def _get_index_fields(self, data_keys=[], extra_field=[]):
"""
fp: filter_parameters
"""
exclude_params = list(self.filter_parameters.fields_index) + \
[self.filter_parameters.value_key] + \
[self.filter_parameters.qflag_key]
return list(p for p in self.filter_parameters.compulsory_fields \
if p not in exclude_params and p in data_keys) + \
self.filter_parameters.fields_index + extra_field
#==========================================================================
def _merge_df_string_columns(self, col_to_merge, new_key=u'new_key', sep=u'__'):
"""
"""
self.df[new_key] = self.df.get(col_to_merge[0]).astype(str).str.cat([ \
self.df.get(key).astype(str) for key in col_to_merge[1:] if key in self.df], sep=sep)
#==========================================================================
def _one_parameter_df_adjustments(self):
"""
fp: filter_parameters
"""
map_dict = {self.filter_parameters.value_key: self.filter_parameters.use_parameters,
self.filter_parameters.qflag_key: 'Q_'+self.filter_parameters.use_parameters}
# Deleting column that only contains parameter name
self._delete_columns_from_df(columns=self.filter_parameters.parameter_key)
# Changing column "VALUE" to parameter name and column "QFLAG" to Q_"parameter_name"
self._rename_columns_of_DataFrame(map_dict)
#==========================================================================
def _seperate_para_value_from_qflag(self, sep=''):
"""
"""
# Simply get the length of one seperated string
for para in self.para_list:
if np.any(self.df[para]):
length = len(self.df[para][self.df.index[self.df[para].notnull()][0]].split(sep))
break
if not 'length' in locals():
raise UserWarning('No data in file?')
for para in self.para_list:
self.df[para] = self.df[para].apply(lambda x: x.split(sep) if x else ['']*length)
self.df[[para,'Q_'+para]] = pd.DataFrame(self.df.get(para).values.tolist())
#==========================================================================
def _set_column_table_from_pivot_table(self, sort=True):
"""
fp: filter_parameters
"""
df_col = self.df.unstack() # necessary to create a new local dataframe here
df_col = df_col.reset_index()
self.df = df_col
if sort:
self.sort_dict_by_keys(sort_order=self.filter_parameters.sort_by_fields,
ascending_list=[True]*len(self.filter_parameters.sort_by_fields),
depth_head=self.filter_parameters.depth_key,
serno_head=self.filter_parameters.visit_id_key)
#==========================================================================
def _set_pivot_table(self, values, index):
"""
"""
self.df = pd.pivot_table(self.df, values=values, index=index, aggfunc='first')
#==========================================================================
def filter_row_data(self, data_filter_object=None, map_object=None):
"""
filters row data using
_one_parameter_df_adjustment() when self.one_parameter = True
_merge_df_string_columns() when self.one_parameter = False
"""
if self.one_parameter:
self._one_parameter_df_adjustments()
else:
self._merge_df_string_columns([self.filter_parameters.value_key, self.filter_parameters.qflag_key],
new_key=u'TEMP_VALUE',
sep='__')
index_fields = self._get_index_fields(data_keys=self.df.keys())
print(len(index_fields), index_fields)
self._set_pivot_table(u'TEMP_VALUE', index_fields)
#==========================================================================
def get_column_data_format(self):
"""
"""
if self.one_parameter:
pass
else:
self._set_column_table_from_pivot_table(sort=True)
self._seperate_para_value_from_qflag(sep='__')
# self.add_df(df_col, 'col', add_columns=False)
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
#class DataFrameHandler(object):
class DataFrameHandler(ColumnDataHandler, RowDataHandler):
"""
Holds functions to handle DataFrame operations
"""
def __init__(self):
super().__init__()
self.wb_id_header = 'MS_CD'
#==========================================================================
def _add_columns(self):
"""
updated 20190123 by Lena Viktorsson
added sample_id column
"""
print('in _add_columns')
# self.df['time'] = pd.Series(pd.to_datetime(self.df['SDATE'] + self.df['STIME'], format='%Y-%m-%d%H:%M'))
# df['latit_dec_deg'] = df['LATIT'].apply(utils.decmin_to_decdeg)
# df['longi_dec_deg'] = df['LONGI'].apply(utils.decmin_to_decdeg)
if not 'LATIT_DD' in self.df and 'LATIT_DM' in self.df:
self.df['LATIT_DD'] = self.df['LATIT_DM'].apply(utils.decmin_to_decdeg)
if not 'LONGI_DD' in self.df and 'LONGI_DM' in self.df:
self.df['LONGI_DD'] = self.df['LONGI_DM'].apply(utils.decmin_to_decdeg)
if 'LATIT_DD' in self.df and 'LONGI_DD' in self.df:
self.df['profile_key'] = self.df['SDATE'].apply(str) + \
' ' + \
self.df['STIME'].apply(str) + \
' ' + \
self.df['LATIT_DD'].apply(str) + \
' ' + \
self.df['LONGI_DD'].apply(str)
if 'SHARKID_MD5' in self.df.columns:
# use sharkid_md for sample id for biological datatypes
self.df['SAMPLE_ID'] = self.df.SHARKID_MD5
elif ('SERNO' in self.df.columns) and ('SHIPC' in self.df.columns):
# use year_seriesno_shipcod for sample_id for phys/chem datatype
self.df['SAMPLE_ID'] = self.df['MYEAR'].apply(str) + \
'_' + \
self.df['SERNO'] + \
'_' + \
self.df['SHIPC']
else:
self.df['SAMPLE_ID'] = self.df['SDATE'] + '_SCM'
#==========================================================================
def _add_field(self):
if self.filter_parameters.add_parameters:
self.df[self.filter_parameters.add_parameters] = ''
#==========================================================================
def _additional_filter(self):
""" Can be overwritten from child """
pass
#==========================================================================
def _apply_field_filter(self):
"""
Selects columns from dataframe
Adds a columns for the origin of the dataframe (filepath)
Organize the data format
"""
self._select_columns_from_df() # use only default fields
self._add_origin_columns(dtype=self.dtype, file_path=self.source) # MW
self._rename_param('No species in sample', self.filter_parameters.use_parameters)
self._organize_data_format()
#==========================================================================
def _calculate_data(self):
""" Can be overwritten from child """
self._add_waterbody_area_info()
#==========================================================================
def _add_origin_columns(self, dtype='', file_path=''):
"""
Created 20180419 by Magnus Wenzer
Updated 20180419 by Magnus Wenzer
Adds collumns for origin_dtype and origin_file_path
"""
self.df['origin_dtype'] = dtype
self.df['origin_file_path'] = os.path.basename(file_path)
#==========================================================================
def _add_waterbody_area_info(self):
print('in _add_waterbody_area_info')
#TODO:
# add if VISS_EU_CD not in df.columns add them from vfk-kod kolumn
wb_id_list = self.column_data[self.source][self.wb_id_header].tolist()
# wd_id = self.mapping_objects['water_body'].get_waterdistrictcode_for_water_body(wb_id_list[0])
# TODO: remove this when fixed problem with WA-code for Inre Idefjordens
if '' in wb_id_list:
# utanför svensk EEZ
pass
if 'WA28238367' in wb_id_list:
# norska delen av Inre Idefjorden
pass
# self.column_data[self.source].loc[
# self.column_data[self.source][self.wb_id_header] == 'WA28238367', self.wb_id_header] = 'WA24081564'
# wb_id_list = self.column_data[self.source][self.wb_id_header].tolist()
if 'WA36808071' in wb_id_list:
# Idefjorden, norska delen?
pass
# self.column_data[self.source].loc[
# self.column_data[self.source][self.wb_id_header] == 'WA36808071', self.wb_id_header] = 'WA18466637'
# wb_id_list = self.column_data[self.source][self.wb_id_header].tolist()
if 'WATER_DISTRICT_CODE' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
wd_id = self.mapping_objects['water_body'].get_waterdistrictcode_for_water_body(wb_id)
new_list.append(wd_id)
self.column_data[self.source]['WATER_DISTRICT_CODE'] = new_list
if 'WATER_DISTRICT_NAME' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
wd_name = self.mapping_objects['water_body'].get_waterdistrictname_for_water_body(wb_id)
new_list.append(wd_name)
self.column_data[self.source]['WATER_DISTRICT_NAME'] = new_list
if 'WATER_TYPE_AREA' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
type_name = self.mapping_objects['water_body'].get_type_area_name_for_water_body(wb_id)
new_list.append(type_name)
self.column_data[self.source]['WATER_TYPE_AREA'] = new_list
if 'WATER_BODY_NAME' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
wb_name = self.mapping_objects['water_body'].get_name_for_water_body(wb_id)
new_list.append(wb_name)
self.column_data[self.source]['WATER_BODY_NAME'] = new_list
if 'MS_CD' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
ms_cd_code = self.mapping_objects['water_body'].get_mscd_for_water_body(wb_id)
new_list.append(ms_cd_code)
self.column_data[self.source]['MS_CD'] = new_list
if 'VISS_EU_CD' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
eu_cd_code = self.mapping_objects['water_body'].get_visseucd_for_water_body(wb_id)
new_list.append(eu_cd_code)
self.column_data[self.source]['VISS_EU_CD'] = new_list
#==========================================================================
def _check_nr_of_parameters(self):
"""
If one_parameter: We only need to set filter to keep parameter. No need
to use pivot_table..
"""
if type(self.filter_parameters.use_parameters) != list:
self.one_parameter=True
else:
self.one_parameter=False
#==========================================================================
def _convert_format(self, key_list, as_type=np.unicode):
"""
"""
for key in key_list:
if key and key in self.df:
try:
self.df[key] = self.df[key].astype(as_type)
except:
print(u'Could not convert format for:', key, u'in DataFrame')
#==========================================================================
def _delete_columns_from_df(self, columns=[]):
"""
"""
self.df = self.df.drop(columns, axis=1, errors='ignore') # inplace=True ?
#==========================================================================
def _drop_duplicates(self, based_on_column=''):
self.df.drop_duplicates(subset=based_on_column, inplace=True)
#==========================================================================
def _filter_column_data(self, df, data_filter_object):
"""
Filters column file data and returns resulting dataframe
"""
boolean = data_filter_object.get_boolean(df)
if not len(boolean):
return df
return df.loc[df.index[boolean], :]
#==========================================================================
def _handle_column_data(self):
"""
"""
# cdh = ColumnDataHandler(DataFrameHandler)
self.sort_columns_of_df()
self.add_column_df()
self._calculate_data()
#==========================================================================
def _handle_row_data(self, append_row_data=True):
"""
Handles row data
Selects parameters
"""
self._select_parameters()
if append_row_data:
self.add_row_df()
if self.raw_data_copy:
self.save_data_as_txt(directory=self.export_directory,
prefix=u'Raw_format')
# rdh = RowDataHandler(DataFrameHandler)
self._additional_filter()
self.filter_row_data()
self.get_column_data_format()
# print(self.df.get('BQIm'))
self.sort_columns_of_df()
self.add_column_df()
# self.add_row_df()
self._calculate_data()
#==========================================================================
def _include_empty_cells(self, data=dict):
# if data is dataframe.. but not working properly
# mask = np.column_stack([data[col].str.contains('"empty"', na=False) for col in data])
# data.loc[mask.any(axis=1)] = ''
#TODO Make it nicer :D
for key in data.keys():
for i, value in enumerate(data.get(key)):
if value == '"empty"':
data[key][i] = ''
return data
#==========================================================================
def _map_parameter_list(self):
"""
"""
# TODO: for rowdata this row results in None type calling unique()
p_map = self.parameter_mapping.get_parameter_mapping(self.df.get(self.filter_parameters.parameter_key).unique())
p_list = list(p for p in p_map if p_map[p] in self.filter_parameters.use_parameters)
return p_map, p_list
#==========================================================================
def _organize_data_format(self):
"""
organize the data based on raw data format, either row or column data
"""
if self.raw_data_format == 'row':
self._handle_row_data()
elif self.raw_data_format == 'column':
self._handle_column_data()
#==========================================================================
def _recognize_format(self):
"""
recognize row or column format based on if there is a parameter_key specified in the filter filer for the datatype
Then sets raw_data attribute to 'row' or 'column'
"""
# TODO why is parameter_key attribute a list for rowdata?
# print(self.filter_parameters.parameter_key)
# print(self.df.keys())
if self.filter_parameters.parameter_key in self.df: #'PARAM' in data header
self.raw_data_format = 'row'
else:
self.raw_data_format = 'column'
#TODO elif recognize netcdf..
#==========================================================================
def _remap_header(self):
"""
remaps header in file according to parameter_mapping file
:return:
"""
# for k in self.df.columns.values:
# print(k)
map_dict = self.parameter_mapping.get_parameter_mapping(self.df.columns.values)
self._rename_columns_of_DataFrame(map_dict)
#==========================================================================
def _rename_columns_of_DataFrame(self, mapping_dict):
"""
"""
self.df = self.df.rename(index=str, columns=mapping_dict)
# ==========================================================================
def _rename_param(self, original_name, new_name):
"""
overwritten in dataframehandler for zoobenthos
"""
print('what?!')
pass
#==========================================================================
def _select_columns_from_df(self):
"""
Keeps only the columns specified in compulsory fields in the datatypes filter file
"""
if self.raw_data_format == 'row':
self._delete_columns_from_df(columns=list(x for x in \
self.df.keys() if x not in self.filter_parameters.compulsory_fields))
elif self.raw_data_format == 'column':
self._delete_columns_from_df(columns=list(x for x in \
self.df.keys() if x not in \
self.filter_parameters.compulsory_fields + \
self.filter_parameters.use_parameters + \
[u'Q_'+p for p in self.filter_parameters.use_parameters]))
#==========================================================================
def _select_parameters(self):
"""
Can be rewritten in child-class, eg. DataHandlerPhytoplankton
First checks number of parameters that should be used and stores as a boolean attribute in self.one_parameter
for later formatting to columns format in ...
"""
self._check_nr_of_parameters()
p_map, p_list = self._map_parameter_list()
self.para_list = self.parameter_mapping.map_parameter_list(p_list)
for para in p_list:
# Change parameter name according to parameter codelist
self.df[self.filter_parameters.parameter_key] = np.where(self.df[self.filter_parameters.parameter_key]==para,
p_map[para],
self.df[self.filter_parameters.parameter_key])
# indices = np.where( self.df[parameter_head] == params_to_use[:,None] )[0]
# indices = np.where( self.df[self.filter_parameters.parameter_key].isin(self.para_list) )[0]
# self.df = self.df.iloc[indices,:]
boolean = self.df[self.filter_parameters.parameter_key].isin(self.para_list)
self.df = self.df.loc[boolean,:]
#==========================================================================
def add_column_df(self, add_columns=True):
"""
Adds data to the internal data structure.
"""
# Add columns (time etc.)
# Should always be true?
if add_columns:
self._add_columns()
self.column_data[self.source] = self.df.copy(deep=True) # One DataFrame per source
# self.column_data = self.column_data.append(self.df, ignore_index=True).fillna('')
#==========================================================================
def add_row_df(self, add_columns=False):
"""
Adds data to the internal data structure.
"""
# Add columns (time etc.)
# Should always be false?
if add_columns:
self._add_columns()
self.row_data[self.source] = self.df.copy(deep=True)
# self.row_data = self.row_data.append(self.df, ignore_index=True).fillna('')
#==========================================================================
def filter_data(self, data_filter_object, filter_id=''):
"""
Filters data according to data_filter_object.
data_filter_object is a core.filters.DataFilter-object.
Returns a DataHandler object with the filtered data.
"""
new_data_handler = DataHandler(self.source + '_filtered_%s' % filter_id)
if len(self.column_data):
# print( 'data_filter_object', data_filter_object)
df = self._filter_column_data(self.column_data, data_filter_object)
if data_filter_object.parameter:
# print('df', df.columns)
# print('data_filter_object.parameter:', data_filter_object.parameter)
for col in list(df.columns):
if col not in core.ParameterList().metadata_list + [data_filter_object.parameter]:
df = df.drop(col, 1)
new_data_handler.add_df(df, 'column')
if len(self.row_data):
df = self._filter_row_data(self.row_data, data_filter_object)
new_data_handler.add_df(df, 'row')
return new_data_handler
#==========================================================================
def get_dict(self, data, drop_nans=True, drop_empty=True):
"""
"""
if drop_nans:
# Index does not matter for the returned dictionary
return { key : list(data.get(key).dropna(axis=0)) for key in data}
else:
return { key : list(data.get(key)) for key in data}
#==========================================================================
def get_index_for_profile_key(self, profile_key):
"""
Method to get index for a unique profile key.
profile_key is "time LATIT LONGI"
"""
return self.column_data.index[self.column_data['profile_key'] == profile_key]
#==========================================================================
def get_profile_key_list(self, year=None):
"""
Returns a list och unique combinations of pos and time.
"""
if year:
return sorted(set(self.column_data.loc[self.column_data['MYEAR'] == year, 'profile_key']))
else:
return sorted(set(self.column_data['profile_key']))
#==========================================================================
def load_source(self, file_path=u'', sep='\t', encoding='cp1252', raw_data_copy=False):
"""
Created by Johannes
Updated 20180419 by Magnus Wenzer
Can be rewritten in child-class, eg. DataHandlerPhytoplankton
"""
self.source = file_path
self.raw_data_copy = raw_data_copy
self.df = core.Load().load_txt(file_path, sep=sep, encoding=encoding, fill_nan=u'')
self._remap_header()
self._recognize_format()
self._apply_field_filter()
#==========================================================================
def delete_source(self, file_path):
"""
Created 20180422 by Magnus Wenzer
Updated 20180422 by Magnus Wenzer
Deletes a sourcs in the data handler.
"""
if file_path in self.column_data.keys():
self.column_data.pop(file_path)
#==========================================================================
def read_filter_file(self, file_path=u'', get_as_dict=True):
"""
"""
data = core.Load().load_txt(file_path, fill_nan=np.nan)
if get_as_dict:
data = self.get_dict(data)
data = self._include_empty_cells(data=data)
# print(data)
self.filter_parameters = core.AttributeDict()
self.filter_parameters._add_arrays_to_entries(**data)
#==========================================================================
def save_data_as_txt(self, directory=u'', prefix=u''):
"""
"""
if not directory:
return False
# directory = os.path.dirname(os.path.realpath(__file__))[:-4] + 'test_data\\test_exports\\'
if not directory.endswith(('/','\\')):
directory = directory + '/'
file_path = directory + '_'.join([prefix, self.dtype, 'data.txt'])
print(u'Saving data to:',file_path)
dir_name = os.path.dirname(file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
self.df.to_csv(file_path, sep='\t', encoding='cp1252', index=False)
#==========================================================================
def save_column_data(self, file_path):
"""
Created: 20180422 by Magnus Wenzer
Last modified: 20180422 by Magnus Wenzer
"""
pickle.dump(self.column_data, open(file_path, "wb"))
#==========================================================================
def sort_dict_by_keys(self,
sort_order=[],
ascending_list=[],
depth_head=None,
serno_head=None,
drop_index=True):
"""
sort_order: key list in sorting order
['key_1','key_2','key_3']
ascending_list: ascending sorting or not (key specific)
[True,False,True]
return_as_dataframe: return as pandas Dataframe
"""
print(u'Sorting..')
if any([depth_head, serno_head]):
self._convert_format([depth_head, serno_head], as_type=np.float)
self.df = self.df.sort_values(sort_order, ascending=ascending_list)
if any([depth_head, serno_head]):
self._convert_format([depth_head, serno_head], as_type=np.unicode)
if drop_index:
print(u'Resetting and Dropping INDEX')
self.df = self.df.reset_index().drop([u'index'], axis=1)
#==========================================================================
def sort_columns_of_df(self):
sort_order = [key for key in self.filter_parameters.compulsory_fields if key in self.df]
if utils.is_sequence(self.filter_parameters.use_parameters):
for para in self.filter_parameters.use_parameters:
if para in self.df:
sort_order.append(para)
if 'Q_'+para in self.df:
sort_order.append('Q_'+para)
else:
if self.filter_parameters.use_parameters in self.df:
sort_order.append(self.filter_parameters.use_parameters)
if 'Q_'+self.filter_parameters.use_parameters in self.df:
sort_order.append('Q_'+self.filter_parameters.use_parameters)
sort_order.extend(['origin_dtype', 'origin_file_path'])
self.df = self.df[sort_order]
# self.df = self.df.ix[:, sort_order]
# self.df.reindex_axis(sort_order, axis=1) # DOES NOT WORK PROPERLY
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class NETCDFDataHandler(DataFrameHandler):
"""
"""
def __init__(self,
export_directory=''):
super().__init__()
self.export_directory = export_directory
#==========================================================================
def load_nc(self):
pass
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerPhysicalChemical(DataFrameHandler):
"""
"""
def __init__(self,
filter_path=u'',
export_directory='',
parameter_mapping=None,
no_qflags=False,
mapping_objects = None): # no_qflags for data that has no quality flags (model data..)
super().__init__()
self.dtype = 'physicalchemical'
self.export_directory = export_directory
self.read_filter_file(file_path=filter_path)
self.parameter_mapping = parameter_mapping
self.mapping_objects = mapping_objects
self.no_qflags = no_qflags
self.column_data = {} #pd.DataFrame()
self.row_data = {} #pd.DataFrame()
# ==========================================================================
def _calculate_data(self):
"""
Rewritten from parent
If there are no quality flags in data self.no_qflags is initialized
as True
"""
#self.check_waterbody_id()
self._add_waterbody_area_info()
# print('_calculate_data')
if self.no_qflags:
self.calculate_din()
else:
self.calculate_din(ignore_qf_list=['B','S'])
# ==========================================================================
def check_waterbody_id(self):
"""
Checks for columns without waterbody id and tries to find waterbody id from waterbodyname
:return: nothing
"""
#TODO: this was started when trying to use data from outside the Swedish EEZ
# that does not have a wb_id but a wb_name. Problem the wb_name has more then one wb_id
wb_name_list = self.df.loc[self.df[self.wb_id_header] == '', "WATER_BODY_NAME"].unique()
for wb_name in wb_name_list:
temp_df = self.mapping_objects['water_body'].get('water_bodies')
wb_id = temp_df.loc[(temp_df["WATERBODY_NAME"] == wb_name) & (temp_df["WB"] == 'Y'), self.wb_id_header]
wb_id_df = temp_df.loc[(temp_df["WATERBODY_NAME"] == wb_name) & (temp_df["WB"] == 'Y')]
if len(wb_id) == 1:
wb_id = wb_id.values[0]
else:
wb_id = wb_id.values[0]
#raise Exception('more than one wb id exists for the waterbody name {}'.format(wb_name))
self.df[self.wb_id_header] == ''
self.df['WATER_BODY_NAME'] == wb_name
self.df.loc[(self.df[self.wb_id_header] == '') & (self.df['WATER_BODY_NAME'] == wb_name),
self.wb_id_header] = wb_id
#==========================================================================
def calculate_din(self, ignore_qf_list=[]):
"""
Returns a vector calculated DIN.
If NO3 is not present, value is np.nan
TODO: add take qflags into consideration?
"""
din_list = []
for no2, no3, nox, nh4 in zip(*self.get_nxx_lists(ignore_qf_list)):
if np.isnan(nox):
din = np.nan
if not np.isnan(no3):
din = no3
if not np.isnan(no2):
din += no2
if not np.isnan(nh4):
din += nh4
else:
din = nox
if not np.isnan(nh4):
din += nh4
if np.isnan(din):
din=''
else:
din = str(round(din, 2))
din_list.append(din)
if not 'DIN' in self.column_data:
self.column_data[self.source]['DIN'] = din_list
else:
self.column_data[self.source]['DIN_calulated'] = din_list
#==========================================================================
def get_float_list(self, key, ignore_qf=[]):
"""
Get all values as floats
"""
return utils.get_float_list_from_str(df=self.column_data[self.source],
key=key, ignore_qf=ignore_qf)
#==========================================================================
def get_nxx_lists(self, ignore_qf_list):
"""
Returns 4 equal in length arrays for NO2, NO3, NO23, NH4..
If a parameter does not excist in the loaded dataset, an array filled
with NaNs is returned for that specific parameter
"""
if 'NTRI' in self.column_data[self.source]:
ntri = self.get_float_list(key='NTRI', ignore_qf=ignore_qf_list)
else:
ntri = [np.nan]*self.column_data[self.source].shape[0]
if 'NTRA' in self.column_data[self.source]:
ntra = self.get_float_list(key='NTRA', ignore_qf=ignore_qf_list)
else:
ntra = [np.nan]*self.column_data[self.source].shape[0]
if 'NTRZ' in self.column_data[self.source]:
ntrz = self.get_float_list(key='NTRZ', ignore_qf=ignore_qf_list)
else:
ntrz = [np.nan]*self.column_data[self.source].shape[0]
if 'AMON' in self.column_data[self.source]:
amon = self.get_float_list(key='AMON', ignore_qf=ignore_qf_list)
else:
amon = [np.nan]*self.column_data[self.source].shape[0]
return ntri, ntra, ntrz, amon
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerPhysicalChemicalSatellite(DataHandlerPhysicalChemical):
def __init__(self,
filter_path=u'',
export_directory='',
parameter_mapping=None,
no_qflags=False,
mapping_objects = None): # no_qflags for data that has no quality flags (model data..)
super().__init__(filter_path=filter_path,
export_directory=export_directory,
parameter_mapping=parameter_mapping,
no_qflags=no_qflags,
mapping_objects = mapping_objects)
self.dtype = 'physicalchemicalsatellite'
#==========================================================================
def _calculate_data(self):
self._set_position()
self._add_waterbody_area_info()
def _set_position(self):
"""
set position of waterbody based on VISS_EU_CD code, this information is not available in MS_CD so should not be self.wb_id_header
:return:
"""
# x=self.column_data[self.source]['VISS_EU_CD'][0]
# print(x[2:4],x[4:6],x[6:8],x[9:11],x[11:13],x[13:15])
# print(int(x[2:4])+int(x[4:6])/60+int(x[6:8])/3600)
# print(int(x[9:11])+int(x[11:13])/60+int(x[13:15])/3600)
# lat=int(x[2:4])+float(x[4:6])/60+float(x[6:8])/60
self.column_data[self.source]['LATIT_DD'] = self.column_data[self.source]['VISS_EU_CD'].apply(lambda x: int(x[2:4])+int(x[4:6])/60+int(x[6:8])/3600 if 'C' not in x else np.nan)
self.column_data[self.source]['LONGI_DD'] = self.column_data[self.source]['VISS_EU_CD'].apply(lambda x: int(x[9:11])+int(x[11:13])/60+int(x[13:15])/3600 if 'C' not in x else np.nan)
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerPhysicalChemicalModel(DataFrameHandler):
"""
"""
def __init__(self,
filter_path=u'',
export_directory='',
parameter_mapping=None,
no_qflags=False,
mapping_objects = None): # no_qflags for data that has no quality flags (model data..)
super().__init__()
self.dtype = 'physicalchemicalmodel'
self.export_directory = export_directory
self.read_filter_file(file_path=filter_path)
self.parameter_mapping = parameter_mapping
self.mapping_objects = mapping_objects
self.no_qflags = no_qflags
self.column_data = {} #pd.DataFrame()
self.row_data = {} #pd.DataFrame()
# ==========================================================================
def _add_serno(self):
"""
adds date as serno
:return:
"""
self.column_data[self.source]['SERNO'] = self.column_data[self.source]['SDATE'].copy()
# ==========================================================================
def _add_shipc(self):
"""
adds SCM as shipcode
:return:
"""
self.column_data[self.source]['SHIPC'] = 'SCM'
#==========================================================================
def _calculate_data(self):
"""
Rewritten from parent
If there are no quality flags in data self.no_qflags is initialized
as True
"""
# print('_calculate_data')
if self.no_qflags:
self.calculate_din()
else:
self.calculate_din(ignore_qf_list=['B','S'])
self._add_waterbody_area_info()
self._set_position()
#==========================================================================
def _set_position(self):
"""
set position of waterbody based on VISS_EU_CD code, this information is not available in MS_CD so should not be self.wb_id_header
:return:
"""
# x=self.column_data[self.source]['VISS_EU_CD'][0]
# print(x[2:4],x[4:6],x[6:8],x[9:11],x[11:13],x[13:15])
# print(int(x[2:4])+int(x[4:6])/60+int(x[6:8])/3600)
# print(int(x[9:11])+int(x[11:13])/60+int(x[13:15])/3600)
# lat=int(x[2:4])+float(x[4:6])/60+float(x[6:8])/60
self.column_data[self.source]['LATIT_DD'] = self.column_data[self.source]['VISS_EU_CD'].apply(lambda x: int(x[2:4])+int(x[4:6])/60+int(x[6:8])/3600 if 'C' not in x else np.nan)
self.column_data[self.source]['LONGI_DD'] = self.column_data[self.source]['VISS_EU_CD'].apply(lambda x: int(x[9:11])+int(x[11:13])/60+int(x[13:15])/3600 if 'C' not in x else np.nan)
#==========================================================================
def calculate_din(self, ignore_qf_list=[]):
"""
Returns a vector calculated DIN.
If NO3 is not present, value is np.nan
"""
din_list = []
for no2, no3, nox, nh4 in zip(*self.get_nxx_lists(ignore_qf_list)):
if np.isnan(nox):
din = np.nan
if not np.isnan(no3):
din = no3
if not np.isnan(no2):
din += no2
if not np.isnan(nh4):
din += nh4
else:
din = nox
if not np.isnan(nh4):
din += nh4
if np.isnan(din):
din=''
else:
din = str(round(din, 2))
din_list.append(din)
if not 'DIN' in self.column_data:
self.column_data[self.source]['DIN'] = din_list
else:
self.column_data[self.source]['DIN_calulated'] = din_list
#==========================================================================
def get_float_list(self, key, ignore_qf=[]):
"""
Get all values as floats
"""
return utils.get_float_list_from_str(df=self.column_data[self.source],
key=key, ignore_qf=ignore_qf)
#==========================================================================
def get_nxx_lists(self, ignore_qf_list):
"""
Returns 4 equal in length arrays for NO2, NO3, NO23, NH4..
If a parameter does not excist in the loaded dataset, an array filled
with NaNs is returned for that specific parameter
"""
if 'NTRI' in self.column_data[self.source]:
ntri = self.get_float_list(key='NTRI', ignore_qf=ignore_qf_list)
else:
ntri = [np.nan]*self.column_data[self.source].shape[0]
if 'NTRA' in self.column_data[self.source]:
ntra = self.get_float_list(key='NTRA', ignore_qf=ignore_qf_list)
else:
ntra = [np.nan]*self.column_data[self.source].shape[0]
if 'NTRZ' in self.column_data[self.source]:
ntrz = self.get_float_list(key='NTRZ', ignore_qf=ignore_qf_list)
else:
ntrz = [np.nan]*self.column_data[self.source].shape[0]
if 'AMON' in self.column_data[self.source]:
amon = self.get_float_list(key='AMON', ignore_qf=ignore_qf_list)
else:
amon = [np.nan]*self.column_data[self.source].shape[0]
return ntri, ntra, ntrz, amon
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerZoobenthos(DataFrameHandler):
"""
"""
def __init__(self, filter_path=u'',
export_directory='',
parameter_mapping=None,
mapping_objects = None):
super().__init__()
self.dtype = 'zoobenthos'
self.export_directory = export_directory
self.read_filter_file(file_path=filter_path)
self.parameter_mapping = parameter_mapping
self.mapping_objects = mapping_objects
self.column_data = {} #pd.DataFrame()
self.row_data = {} #pd.DataFrame()
#==========================================================================
def _calculate_data(self):
"""
Rewritten from parent
If there are no quality flags in data self.no_qflags is initialized
as True
"""
self._add_waterbody_area_info()
#self._add_obspoint()
#==========================================================================
def _rename_param(self, original_name, new_name):
"""
renames row with original_nam to new_name parameter_key column
:param original_name: the parameter name that should be changed
:param new_name: the new name for the parameter
:return:
"""
print('hello')
mapping_dict = self.parameter_mapping.get_parameter_mapping([self.filter_parameters.rename_parameter])
temp = self.df.loc[self.df[self.filter_parameters.parameter_key] == original_name,
self.filter_parameters.parameter_key]
self.df.loc[self.df[self.filter_parameters.parameter_key] == original_name,
self.filter_parameters.parameter_key] = new_name
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerChlorophyll(DataFrameHandler):
"""
"""
def __init__(self, filter_path=u'',
export_directory='',
parameter_mapping=None,
mapping_objects = None):
super().__init__()
self.dtype = 'chlorophyll' # Only Tube samples ?
self.export_directory = export_directory
self.read_filter_file(file_path=filter_path)
self.parameter_mapping = parameter_mapping
self.mapping_objects = mapping_objects
self.column_data = {} #pd.DataFrame()
self.row_data = {} #pd.DataFrame()
#==========================================================================
def _calculate_data(self):
"""
Rewritten from parent
If there are no quality flags in data self.no_qflags is initialized
as True
"""
self._add_waterbody_area_info()
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerPhytoplankton(DataFrameHandler):
"""
"""
def __init__(self, filter_path=u'',
export_directory='',
parameter_mapping=None,
mapping_objects = None):
super().__init__()
self.dtype = 'phytoplankton'
self.export_directory = export_directory
self.read_filter_file(file_path=filter_path)
self.parameter_mapping = parameter_mapping
self.mapping_objects = mapping_objects
self.column_data = {} #pd.DataFrame()
self.row_data = {} #pd.DataFrame()
# ==========================================================================
def _additional_filter(self):
self._delete_columns_from_df(columns=self.filter_parameters.extra_fields + [self.filter_parameters.value_key])
self._drop_duplicates(based_on_column='SHARKID_MD5')
# TODO: check if this overwrites earlier info and then why
self.filter_parameters.use_parameters = 'BIOV_CONC_ALL'
# ==========================================================================
def _calculate_data(self):
"""
Rewritten from parent
If there are no quality flags in data self.no_qflags is initialized
as True
"""
self._add_waterbody_area_info()
# ==========================================================================
def _extended_filter_for_phytoplanton_data(self):
"""
Selects parameters and TROPHIC-status according to
self.filter_parameters
"""
self.df = utils.set_filter(df=self.df,
filter_dict={self.filter_parameters.parameter_key : self.para_list,
self.filter_parameters.trophic_key : self.filter_parameters.use_trophic},
return_dataframe=True)
#==========================================================================
def _get_total_biovolume(self, samp_key=''):
"""
Created: 2017 by Johannes Johansson
Modified: 20180320 by Lena Viktorsson (changes df.astype(np.float) to pd.to_numeric(df))
"""
# keys could be set in filter_parameters instead..
# print(self.df.get(samp_key).unique)
for sample in self.df.get(samp_key).unique():
boolean = utils.set_filter(df=self.df,
filter_dict={samp_key:sample})
#tot_value = self.df.loc[boolean,self.filter_parameters.value_key].astype(np.float).sum(skipna=True)
tot_value = pd.to_numeric(self.df.loc[boolean,self.filter_parameters.value_key]).sum(skipna=True)
self.df.loc[boolean, self.filter_parameters.add_parameters] = str(tot_value)
#==========================================================================
def _select_parameters(self):
"""
Rewritten from parent-class
"""
#spara undan och sedan delete .extra_fields
# spara undan som kolumnformat
self._check_nr_of_parameters()
p_map, p_list = self._map_parameter_list()
self.para_list = self.parameter_mapping.map_parameter_list(p_list)
for para in p_list:
# Change parameter name according to parameter codelist
#TODO CHeck if this variant of np.where works with pandas irregular index..
self.df[self.filter_parameters.parameter_key] = np.where(self.df[self.filter_parameters.parameter_key]==para,
p_map[para],
self.df[self.filter_parameters.parameter_key])
self._extended_filter_for_phytoplanton_data()
self._add_field()
self._get_total_biovolume(samp_key='SHARKID_MD5')
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class DataHandler(object):
"""
Class to hold data.
"""
#TODO metod för att kontrollera odeffinerade datafiler, vilken datatyp är
#det som gäller? input från användaren eller datafilen.. finns inte datatyp
#i filen? säg till användaren.. när vi vet datatyp, spara filnamn i fil
#TODO check dubblett
def __init__(self,
input_data_directory=None,
resource_directory=None,
mapping_objects=None,
wb_id_header=None):
# print(input_data_directory, resource_directory)
assert all([input_data_directory, resource_directory])
super().__init__()
# self.source = source
# self.column_data = pd.DataFrame()
# self.row_data = pd.DataFrame()
self.input_data_directory = input_data_directory
self.resource_directory = resource_directory
# TODO: Maybe WorkSpace should specify these too
self.raw_data_directory = self.input_data_directory + '/raw_data'
self.export_directory = self.input_data_directory + '/exports'
path_parameter_mapping = self.resource_directory + '/mappings/mapping_parameter_dynamic_extended.txt'
path_fields_filter = self.resource_directory + '/filters/'
self.mapping_objects = mapping_objects
self.wb_id_header = wb_id_header
# path_parameter_mapping = current_path + u'/test_data/mappings/mapping_parameter_dynamic_extended.txt'
# path_fields_filter = current_path + u'/test_data/filters/'
self._load_field_mapping(file_path=path_parameter_mapping)
# All datatypes that might include data for setting ecological status
self.all_datatypes = [u'chlorophyll',
u'physicalchemical',
u'physicalchemicalsatellite',
u'physicalchemicalmodel',
u'phytoplankton',
u'zoobenthos']
#TODO lägg in datatypsobject i dict ? seperate sources as keys... 'phyche_source' DONE!
self.chlorophyll = DataHandlerChlorophyll(filter_path=path_fields_filter+u'filter_fields_chlorophyll_integrated.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
mapping_objects = self.mapping_objects)
self.physicalchemical = DataHandlerPhysicalChemical(filter_path=path_fields_filter+'filter_fields_physical_chemical.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
mapping_objects = self.mapping_objects)
self.physicalchemicalsatellite = DataHandlerPhysicalChemicalSatellite(filter_path=path_fields_filter+'filter_fields_physical_chemical_satellite.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
mapping_objects = self.mapping_objects)
self.physicalchemicalmodel = DataHandlerPhysicalChemicalModel(filter_path=path_fields_filter+'filter_fields_physical_chemical_model.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
no_qflags=True,
mapping_objects = self.mapping_objects)
self.phytoplankton = DataHandlerPhytoplankton(filter_path=path_fields_filter+u'filter_fields_phytoplankton.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
mapping_objects = self.mapping_objects)
self.zoobenthos = DataHandlerZoobenthos(filter_path=path_fields_filter+'filter_fields_zoobenthos.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
mapping_objects = self.mapping_objects)
self.float_parameters = []
for data_type in [self.__getattribute__(dtype) for dtype in self.all_datatypes]:#[self.chlorophyll, self.physicalchemical, self.physicalchemicalsatellite, self.physicalchemicalmodel, self.phytoplankton, self.zoobenthos]:
if isinstance(data_type.filter_parameters.use_parameters, str):
self.float_parameters = self.float_parameters + [data_type.filter_parameters.use_parameters]
else:
self.float_parameters = self.float_parameters + data_type.filter_parameters.use_parameters
# self.all_data = None
self.all_data = pd.DataFrame() # MW
#==========================================================================
def _load_field_mapping(self, file_path=u''):
"""
"""
self.parameter_mapping = core.ParameterMapping()
self.parameter_mapping.load_mapping_settings(file_path=file_path)
#==========================================================================
def add_df(self, pd_df, data_type, add_columns=False):
"""
Updated 20180828 by Magnus
Adds data to the internal data structure.
"""
# Add columns (time etc.)
# This is never used from here, and should it be?
# This is called from filter_data & add_txt_file, there should be no columns added when filtering arr adding df
if add_columns:
self._add_columns(pd_df)
if 'col' in data_type:
self.column_data = self.column_data.append(pd_df, ignore_index=True)
# Remove duplicate rows
self.column_data.drop_duplicates(inplace=True) # MW: 20180828
elif 'row' in data_type:
self.row_data = self.row_data.append(pd_df, ignore_index=True).fillna('')
# Remove duplicate rows
self.row_data.drop_duplicates(inplace=True) # MW: 20180828
# print(self.data_phys_chem.head())
#==========================================================================
# def add_txt_file(self, file_path, data_type):
def add_txt_file(self, file_path, data_type, map_object=None):
data = pd.read_csv(file_path, sep='\t', encoding='cp1252')
if map_object != None:
map_dict = map_object.get_parameter_mapping( data.columns.values )
data = self._rename_columns_of_DataFrame( data, map_dict )
self.add_df(data, data_type)
# TODO: Check if all is ok
# #==========================================================================
# def filter_data(self, data_filter_object, filter_id=''):
# """
# Filters data according to data_filter_object.
# data_filter_object is a core.filters.DataFilter-object.
# Returns a DataHandler object with the filtered data.
# """
# new_data_handler = DataHandler(self.source + '_filtered_%s' % filter_id)
# if len(self.column_data):
## print( 'data_filter_object', data_filter_object)
# df = self._filter_column_data(self.column_data, data_filter_object)
# if data_filter_object.parameter:
## print('df', df.columns)
## print('data_filter_object.parameter:', data_filter_object.parameter)
# for col in list(df.columns):
# if col not in core.ParameterList().metadata_list + [data_filter_object.parameter]:
# df = df.drop(col, 1)
# new_data_handler.add_df(df, 'column')
# if len(self.row_data):
# df = self._filter_row_data(self.row_data, data_filter_object)
# new_data_handler.add_df(df, 'row')
#
# return new_data_handler
# #==========================================================================
# def _filter_column_data(self, df, data_filter_object):
# """
# Filters column file data and returns resulting dataframe
# """
# #TODO kolla på flera DF ? annan struktur ?
# boolean = data_filter_object.get_column_data_boolean(df)
#
# if not len(boolean):
# return df
# return df.loc[df.index[boolean], :]
#==========================================================================
def get_all_column_data_df(self, boolean_filter=[]):
"""
mw
Returns a pandas dataframe that contains all data in column format.
boolean_filter is a pd.Series. If not given the whole df is returned.
"""
# TODO: what do we return when boolean_filter is False because no filter har been set for the key given?
if len(boolean_filter):
# TODO: Check length
return self.all_data.loc[boolean_filter, :]
else:
return self.all_data
#==========================================================================
def merge_all_data(self, save_to_txt=False):
"""
Created:
Last modified: 20180720 by Magnus Wenzer
- Do we need to sort all_data ?
- Merge data from different datatypes for the same visit ?
"""
self.all_data = pd.DataFrame()
# All datatypes that might include data for setting ecological status
# all_datatypes = [u'chlorophyll',
# u'physicalchemical',
# u'physicalchemicalsatellite',
# u'physicalchemicalmodel',
# u'phytoplankton',
# u'zoobenthos']
# TODO: vart ska vi kolla mandatory keys? och vart ska de läsas in?
mandatory_keys = []#['DEPH']
for dtype in self.all_datatypes:
if dtype in dir(self):
# print(dtype)
# print(self.__getattribute__(dtype).column_data)
# Appends dataframes from each datatype into one dataframe
for source in self.__getattribute__(dtype).column_data:
# Each datatype might have multiple sources..
# .column_data is a dict in each datatypes DataFrameHandler object
df = self.__getattribute__(dtype).column_data[source]
if not all([item in df.columns for item in mandatory_keys]):
raise exceptions.MissingKeyInData(message=os.path.basename(source))
if any(df.columns.duplicated()):
print('duplicates in data from source {} \n duplicate columns {}'.format(source, df[df.columns.duplicated()]))
raise exceptions.MissingKeyInData(message=os.path.basename(source))
self.all_data = self.all_data.append(df,
ignore_index=True)
if not len(self.all_data):
print('No data available after "merge_all_data"!')
return False
# Save pkl-file for all_data_raw. Updated 20180525 by Magnus Wenzer
sld_object = core.SaveLoadDelete(self.export_directory)
sld_object.save_df(self.all_data, file_name='all_data_raw', force_save_txt=True, only_pkl=not save_to_txt)
# pickle.dump(self.all_data, open(self.export_directory + "/all_data_raw.pickle", "wb"))
# if save_to_txt:
# save_data_file(df=self.all_data,
# directory=self.export_directory,
# file_name='all_data.txt')
# Load data again. This way we can treet new and old
#"self.all_data" the same way
self.all_data = pd.DataFrame()
self.load_all_datatxt()
#==========================================================================
def load_datatypetxt(self, datatype, sep='\t', encoding='cp1252'):
"""
loads existing data files for the given datatype from export directory (from pickle if existing, otherwise from txt)
Created: 20180422 by Magnus Wenzer
Last modified: 20180422 by Magnus Wenzer
"""
# Column data file
try:
file_path = '{}/column_format_{}_data.pickle'.format(self.export_directory, datatype)
# pd_df = pickle.load(open(file_path, "rb"))
# self.add_df(pd_df, data_type) # here data_type is row or col
# TODO: should this really say self.column_data = ? It will then replace anything already in self.column_data with new content.
# self.column_data = pickle.load(open(file_path, "rb"))
self.__getattribute__(datatype).column_data = pickle.load(open(file_path, "rb"))
return True
except (OSError, IOError) as e:
return False
# try:
# file_path = '{}/column_format_{}_data.txt'.format(self.export_directory, datatype)
# self.column_data = load_data_file(file_path)
# except:
# return False
# # Raw data file
# file_path = '{}/raw_format_{}_data.txt'.format(self.export_directory, datatype)
# try:
# self.row_data = load_data_file(file_path)
# except (OSError, IOError) as e:
# return False
#
# return True
#==========================================================================
def load_all_datatxt(self, sep='\t', encoding='cp1252'):
"""
loads existing all_data file from export directory (from pickle if existing, otherwise from txt)
Created: 20180318 by Lena Viktorsson
Last modified: 20180525 by Magnus Wenzer
"""
def float_convert(x):
try:
return float(x)
except:
# print('float_convert')
return np.nan
def str_convert(x):
x = str(x)
if x == 'nan':
x = ''
return x
# print('self.all_data', len(self.all_data))
if len(self.all_data):
print('self.all_data length', len(self.all_data, 'continue to load all_data'))
# return False, False
else:
sld_object = core.SaveLoadDelete(self.export_directory) # 20180525 by Magnus Wenzer
try:
self.all_data = sld_object.load_df('all_data', load_txt=False) # 20180525 by Magnus Wenzer
# print()
# with open(self.export_directory + "/all_data.pkl", "rb") as fid:
# self.all_data = pickle.load(fid)
filetype = 'pickle'
print('all_data loaded from pickle')
except (FileNotFoundError, UnboundLocalError) as e:
# UnboundLocalError is for when df was not created in sld_object.load_df()
print('setting up all_data all_data_raw.pkl')
try:
self.all_data = sld_object.load_df('all_data_raw', load_txt=False) # 20180525 by Magnus Wenzer
# self.all_data = pickle.load(open(self.export_directory + "/all_data_raw.pickle", "rb"))
except (OSError, IOError) as e:
raise(OSError, IOError, 'Raw data pickle file does not exist! This is created during in "merge_all_data".')
# self.all_data = load_data_file(self.export_directory + '/all_data.txt')
# self.all_data = core.Load().load_txt(self.export_directory + '/all_data.txt', sep=sep, encoding=encoding, fill_nan=u'')
#TODO: better way to say which columns should be converted to float and int?
self.all_data['MONTH'] = self.all_data['SDATE'].apply(lambda x: int(x[5:7]))
self.all_data['YEAR'] = self.all_data['SDATE'].apply(lambda x: int(x[0:4]))
# try:
# self.all_data['MYEAR'] = self.all_data['MYEAR'].astype(int)
# except KeyError:
self.all_data['MYEAR'] = self.all_data['YEAR']
# self.all_data['YEAR'] = self.all_data['SDATE'].apply(lambda x: int(x[0:4])).astype(int)
# TODO: does not work with only datatypes that does not have column DEPH, example zoobenthos
self.all_data['DEPH'] = self.all_data['DEPH'].apply(lambda x: float(x) if x else np.nan)
self.all_data['POSITION'] = self.all_data.apply(lambda x: '{0:.2f}'.format(float_convert(x.LATIT_DD)) + '_' + '{0:.2f}'.format(float_convert(x.LONGI_DD)), axis = 1)
if 'STATN' not in self.all_data.columns:
self.all_data['STATN'] = self.all_data[self.wb_id_header]
statn = self.all_data.STATN.tolist()
pos = self.all_data.POSITION.tolist()
for i, x in enumerate(statn):
if x == "":
statn[i] = pos[i]
# set all station names to uppercase to limit number of synonyms
self.all_data['STATN'] = [s.upper() for s in statn]
if 'MNDEP' not in self.all_data.columns:
self.all_data['MNDEP'] = np.nan
self.all_data['MXDEP'] = np.nan
# MW: Add visit_id
# TODO: in all places where this is used change to use sample_id instead and remove this
self.all_data['visit_id_str'] = self.all_data[self.wb_id_header] + \
self.all_data['POSITION'] + \
self.all_data['SDATE'] + \
self.all_data['STIME']
for col in self.all_data.columns:
if col.startswith('Q_'):
par = col[2:]
self.all_data[par] = self.all_data[par].apply(float_convert)
self.all_data[col] = self.all_data[col].apply(str_convert)
# TODO: send info to user
elif col in ['DIN', 'CPHL_BTL', 'CPHL_SAT','WADEP', 'MNDEP', 'MXDEP']:
self.all_data[col] = self.all_data[col].apply(float_convert)
elif col in self.float_parameters:
self.all_data[col] = self.all_data[col].apply(float_convert)
elif self.wb_id_header == 'VISS_EU_CD' and col == self.wb_id_header:
self.all_data[col] = self.all_data[col].apply(lambda x: 'SE' + x if 'SE' not in x else x)
else:
pass
self.all_data['STIME'] = self.all_data['STIME'].apply(lambda x: x[:5])
# MW 20180716
# TODO: Speed up, problem here areaf ew data with day 00. Maybe find those and exclude and then do pd.to_datetime
try:
self.all_data['date'] = pd.to_datetime(self.all_data['SDATE'])
except ValueError:
remove_index = []
for row_index in self.all_data.index:
try:
pd.to_datetime(self.all_data.iloc[row_index].SDATE)
except ValueError:
#self.all_data.loc[row_index, 'SDATE'] = ''
remove_index.append(row_index)
sld_object = core.SaveLoadDelete(self.export_directory)
sld_object.save_df(self.all_data.iloc[remove_index], 'removed__before_saving_all_data')
self.all_data.drop(remove_index, inplace = True)
self.all_data['date'] = pd.to_datetime(self.all_data['SDATE'])
# MW: Add prioritized salinity
self._add_prioritized_parameter('SALT', 'SALT_BTL', 'SALT_CTD')
# MW: Add prioritized temperature
self._add_prioritized_parameter('TEMP', 'TEMP_BTL', 'TEMP_CTD')
# MW: Add prioritized oxygen
self._add_prioritized_parameter('DOXY', 'DOXY_BTL', 'DOXY_CTD')
if 'CPHL_BTL' in self.all_data.columns:
# MW: Add integrated chlorophyll from CHPL_BTL
self._add_integrated_calc(use_par='CPHL_BTL',
new_par='CPHL_INTEG_CALC',
depth_interval=[0, 10],
exclude_qf=[u'?',u'B',u'S'],
min_nr_values=2)
self._add_waterbody_area_info()
sld_object.save_df(self.all_data, file_name='all_data', force_save_txt=True, only_pkl=False) # 20180525 by Magnus Wenzer
filetype = 'txt'
print('all_data loaded from txt and new parameters added')
return True, filetype
#==========================================================================
def _add_prioritized_parameter(self, new_par, primary_par, secondary_par, exclude_qf=['B', 'S']):
"""
Created: 20180413 by Magnus Wenzer
Last modified: 20180419 by Magnus Wenzer
Adds the parameter <new_par_name> by combining the parameters in args.
The first parameter in args that is not have a quality flag listed in exclude_qf
will be prioritized.
Three columns are added to self.all_data:
<new_par_name>
Q_<new_par_name>
source_<new_par_name>
"""
t0 = time.time()
primary_par_qf = 'Q_' + primary_par
secondary_par_qf = 'Q_' + secondary_par
q_new_par = 'Q_'+new_par
source_new_par = 'source_'+new_par
if not all([True if item in self.all_data.columns else False \
for item in [primary_par, primary_par_qf, secondary_par, secondary_par_qf]]):
if all([True if item in self.all_data.columns else False \
for item in [primary_par, secondary_par]]):
print('both parameters {} and {} in data but no q_flags'.format(primary_par, secondary_par))
elif primary_par in self.all_data.columns and secondary_par not in self.all_data.columns:
self.all_data[new_par] = self.all_data[primary_par].copy()
self.all_data[source_new_par] = primary_par
return True
elif secondary_par in self.all_data.columns and primary_par not in self.all_data.columns:
self.all_data[new_par] = self.all_data[secondary_par].copy()
self.all_data[source_new_par] = secondary_par
return True
else:
return False
self.all_data[new_par] = np.nan
self.all_data[q_new_par] = ''
self.all_data[source_new_par] = ''
# Find where primary is valid
primary_valid = ~pd.isnull(self.all_data[primary_par]) & \
~self.all_data[primary_par_qf].isin(exclude_qf)
# Add where primary is valid
self.all_data.loc[primary_valid, new_par] = self.all_data.loc[primary_valid, primary_par]
self.all_data.loc[primary_valid, q_new_par] = self.all_data.loc[primary_valid, primary_par_qf]
self.all_data.loc[primary_valid, source_new_par] = primary_par
# Find where primary is not valid and secondary is
add_secondary_valid = ~pd.isnull(self.all_data[secondary_par]) & \
~self.all_data[secondary_par_qf].isin(exclude_qf) & \
~primary_valid
# Add where primary is not valid and secondary is
self.all_data.loc[add_secondary_valid, new_par] = self.all_data.loc[add_secondary_valid, secondary_par]
self.all_data.loc[add_secondary_valid, q_new_par] = self.all_data.loc[add_secondary_valid, secondary_par_qf]
self.all_data.loc[add_secondary_valid, source_new_par] = secondary_par
print('time for _add_prioritized_parameter {} is: {}'.format(new_par, time.time()-t0))
def _add_waterbody_area_info(self):
pass
# This is done in DataFrameHandler, but why not here?
#TODO:
# add if MS_CD, VISS_EU_CD; not in df.columns add them from vfk-kod kolumn
# wb_id_list = self.all_data[self.wb_id_header].tolist()
# # wd_id = self.mapping_objects['water_body'].get_waterdistrictcode_for_water_body(wb_id_list[0])
# if 'WATER_DISTRICT_CODE' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# wd_id = self.mapping_objects['water_body'].get_waterdistrictcode_for_water_body(wb_id)
# new_list.append(wd_id)
# self.all_data['WATER_DISTRICT_CODE'] = new_list
# if 'WATER_DISTRICT_NAME' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# wd_name = self.mapping_objects['water_body'].get_waterdistrictname_for_water_body(wb_id)
# new_list.append(wd_name)
# self.all_data['WATER_DISTRICT_NAME'] = new_list
# if 'WATER_TYPE_AREA' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# type_name = self.mapping_objects['water_body'].get_type_area_name_for_water_body(wb_id)
# new_list.append(type_name)
# self.all_data['WATER_TYPE_AREA'] = new_list
# if 'WATER_BODY_NAME' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# wb_name = self.mapping_objects['water_body'].get_name_for_water_body(wb_id)
# new_list.append(wb_name)
# self.all_data['WATER_BODY_NAME'] = new_list
#===========================================================================
def get_exclude_index_array(self, df):
"""
Created: 20180423 by Magnus Wenzer
Last modified: 20180423 by Magnus Wenzer
"""
exclude_list = []
for col in df.columns:
if 'Q_' in col:
exclude_list.append(col[2:])
exclude_list.append(col)
elif 'source' in col:
exclude_list.append(col)
elif 'DIN' in col:
exclude_list.append(col)
elif 'DEPH' in col:
exclude_list.append(col)
exclude_index_list = [True if par in exclude_list else False for par in df.columns]
return np.array(exclude_index_list)
#===========================================================================
def _add_integrated_calc(self,
use_par=None,
new_par=None,
depth_interval=[0, 10],
exclude_qf=[u'?',u'B',u'S'],
min_nr_values=2):
"""
Created: 20180423 by Magnus Wenzer
Last modified: 20180423 by Magnus Wenzer
"""
#----------------------------------------------------------------------
def calculate(df):
if len(df) < min_nr_values:
#print(len(df))
return False
# Extrac data lists
depth_list = list(df['DEPH'].values)
value_list = list(df[use_par].values)
t_calc_integ = time.time()
mean_value = utils.get_integrated_mean(depth_list,
value_list,
depth_interval)
time_list_calc_integ.append(time.time() - t_calc_integ)
t_add_row = time.time()
# Add info to row
new_row_series = df.loc[df.index[0], :].copy(deep=True)
new_row_series[new_par] = mean_value
new_row_series[new_par_depths] = ';'.join(map(str, depth_list))
new_row_series[new_par_values] = ';'.join(map(str, value_list))
new_row_series['MNDEP'] = depth_interval[0]
new_row_series['MXDEP'] = depth_interval[1]
#print('df.columns', len(df.columns))
#print(df.columns)
new_row = np.array(new_row_series)
# sets the other (with Q_flag, DIN and DEPH) parameters to nan
new_row[exclude_index_array] = np.nan
new_list_to_append.append(list(new_row))
time_list_add_row.append(time.time() - t_add_row)
return True
#----------------------------------------------------------------------
new_par_depths = new_par + '_depths'
new_par_values = new_par + '_values'
new_list_to_append = [] # list of lists with the new rows to be added to all_data once all calculations are done
# new_df = pd.DataFrame(columns=all_data.columns)
time_list_group_data = []
time_list_calc_integ = []
time_list_add_row = []
t_tot = time.time()
t_preparations = time.time()
# Add result columns
self.all_data[new_par] = np.nan
self.all_data[new_par_depths] = np.nan
self.all_data[new_par_values] = np.nan
exclude_index_array = self.get_exclude_index_array(self.all_data)
# print(len(exclude_index_array))
# print(len(all_data.columns))
# Narrow the data to only include lines where par is present and depth is in range
use_par_boolean = ~self.all_data[use_par].isnull()
depth_boolean = (self.all_data['DEPH'] >= depth_interval[0]) & \
(self.all_data['DEPH'] <= depth_interval[1])
active_boolean = use_par_boolean & depth_boolean
time_preparations = time.time() - t_preparations
t_group_data = time.time()
grouped_data = self.all_data.loc[active_boolean, :].groupby('visit_id_str')
time_list_group_data.append(time.time() - t_group_data)
t_iterator = time.time()
calculations = (calculate(group) for visit_id, group in grouped_data)
time_iterator = time.time() - t_iterator
t_all_calculation = time.time()
result = list(calculations)
time_all_calculation = time.time() - t_all_calculation
# Add new rows to self.all_data
t_add_data = time.time()
add_lines_df = pd.DataFrame(new_list_to_append, columns=self.all_data.columns)
self.all_data = self.all_data.append(add_lines_df)
self.all_data.reset_index(drop=True, inplace=True)
time_add_data = time.time() - t_add_data
time_total = time.time() - t_tot
print('-'*50)
print('Total time:', time_total)
print('time_preparations'.ljust(30), time_preparations)
print('time_list_group_data:'.ljust(30), sum(time_list_group_data))
print('time_list_calc_integ:'.ljust(30), sum(time_list_calc_integ))
print('time_list_add_row:'.ljust(30), sum(time_list_add_row))
print('time_all_calculations:'.ljust(30), time_all_calculation)
print('time_iterator:'.ljust(30), time_iterator)
print('time_add_data:'.ljust(30), time_add_data)
print('Done adding integrated_calc "{}" using parameter "{}"'.format(new_par, use_par))
print('time for integrated_calc "{}" using parameter "{} is: {}'.format(new_par, use_par, time_total))
#===========================================================================
def old_add_integrated_calc(self,
par,
new_par_name,
depth_interval=[0, 10],
exclude_qf=[u'?',u'B',u'S'],
min_nr_values=2):
"""
Created: 20180420 by Magnus Wenzer
Last modified: 20180420 by Magnus Wenzer
"""
def calculate(current_visit_id):
# print(current_visit_id)
visit_boolean = self.all_data['visit_id_str'] == current_visit_id
index = par_boolean & visit_boolean
# Extrac data lists
depth_list = list(self.all_data.loc[index, 'DEPH'].values)
value_list = list(self.all_data.loc[index, par].values)
# Continue if not enough data to calculate
# if len(depth_list) < min_nr_values:
# return False
mean_value = utils.get_integrated_mean(depth_list,
value_list,
depth_interval)
new_row = []
for parameter, value in zip(self.all_data.columns, self.all_data.loc[visit_boolean,:].values[0]):
if parameter == 'MNDEP':
new_row.append(depth_interval[0])
elif parameter == 'MXDEP':
new_row.append(depth_interval[1])
elif parameter == new_par_name:
new_row.append(mean_value)
elif parameter == new_par_name_depth:
new_row.append(';'.join(map(str, depth_list)))
elif parameter == new_par_name_values:
new_row.append(';'.join(map(str, value_list)))
elif parameter in exclude_list:
new_row.append(np.nan)
else:
new_row.append(value)
# print(len(self.all_data)+1)
self.all_data.loc[max(self.all_data)+1, :] = new_row
return True
new_par_name_depth = new_par_name + '_depths'
new_par_name_values = new_par_name + '_values'
# Add new columns to dataframe
self.all_data[new_par_name] = np.nan
self.all_data[new_par_name_depth] = ''
self.all_data[new_par_name_values] = ''
# Check columns to exclude in row
exclude_list = []
for item in self.all_data.columns:
if item.startswith('Q_'):
exclude_list.append(item[2:])
exclude_list.append(item)
elif item.startswith('source_'):
exclude_list.append(item)
# Create boolen where par has values
par_boolean = ~self.all_data[par].isnull()
#----------------------------------------------------------------------
# Depth boolean to reduce nr of unique visits.
# This has to be removed/changed if halocline depth should be used
# instead of fixed depth interval.
# OBS! also used below!
depth_boolean = (self.all_data['DEPH'] >= depth_interval[0]) & \
(self.all_data['DEPH'] <= depth_interval[1])
par_boolean = par_boolean & depth_boolean
#----------------------------------------------------------------------
# Get list och unique visits
unique_visit_id_list = list(set(self.all_data.loc[par_boolean, 'visit_id_str']))
temp = list(map(calculate, unique_visit_id_list))
# return
## # Get next index in self.all_data . Increment this after adding new line to save time
## next_index = max(self.all_data.index) + 1
##
##
## #----------------------------------------------------------------------
## input_dict = {'current_visit_id': current_visit_id,
## }
##
##
## df_list = [by_year_pos.loc[by_year_pos.YEAR == year]['position_mean']]*n
## def bootstrap(df):
## return df.sample(frac = 1, replace = True).mean()
##
## BQIsim = map(bootstrap, df_list)
# #----------------------------------------------------------------------
#
#
#
#
# # Loop unique visits
# for k, current_visit_id in enumerate(unique_visit_id_list):
# if not k%100:
# print(k, current_visit_id)
## # Create boolen where par has values
## par_boolean = ~self.all_data[par].isnull()
##
## #----------------------------------------------------------------------
## # Depth boolean to reduce nr of unique visits.
## # This has to be removed/changed if halocline depth should be used
## # instead of fixed depth interval.
## # OBS! also used below!
## depth_boolean = (self.all_data['DEPH'] >= depth_interval[0]) & \
## (self.all_data['DEPH'] <= depth_interval[1])
## par_boolean = par_boolean & depth_boolean
## #----------------------------------------------------------------------
#
#
# visit_boolean = self.all_data['visit_id_str'] == current_visit_id
# index = par_boolean & visit_boolean
#
# # Extrac data lists
# depth_list = list(self.all_data.loc[index, 'DEPH'].values)
# value_list = list(self.all_data.loc[index, par].values)
#
# # Continue if not enough data to calculate
# if len(depth_list) < min_nr_values:
# continue
#
## #--------------------------------------------------------------
## par_boolean = ~self.all_data['CPHL_BTL'].isnull()
##
## depth_boolean = (self.all_data['DEPH'] >= depth_interval[0]) & \
## (self.all_data['DEPH'] <= depth_interval[1])
## par_boolean = par_boolean & depth_boolean
##
## visit_boolean = self.all_data['visit_id_str'] == '58.9113311.187502017-08-0111:40'
##
## print('='*50)
## print('1')
## print('='*50)
## print(self.all_data.loc[visit_boolean & par_boolean, ['index_column', 'DEPH', 'CPHL_BTL', 'Q_CPHL_BTL']])
## print('-'*50)
## #--------------------------------------------------------------
# #--------------------------------------------------------------
#
## print('='*50)
## print('2')
## print('='*50)
## print(self.all_data.loc[index, ['index_column', 'DEPH', 'CPHL_BTL', 'Q_CPHL_BTL']])
## print('-'*50)
## #--------------------------------------------------------------
##
##
## print('-'*50)
## print(current_visit_id)
## print(par)
## print(np.where(visit_boolean))
## print(np.where(visit_boolean))
## print(depth_list)
## print(value_list)
## print(depth_interval)
## print(len(self.all_data) )
## print(len(par_boolean))
#
# mean_value = utils.get_integrated_mean(depth_list,
# value_list,
# depth_interval)
#
# new_row = []
# for parameter, value in zip(self.all_data.columns, self.all_data.loc[visit_boolean,:].values[0]):
# if parameter == 'MNDEP':
# new_row.append(depth_interval[0])
# elif parameter == 'MXDEP':
# new_row.append(depth_interval[1])
# elif parameter == new_par_name:
# new_row.append(mean_value)
# elif parameter == new_par_name_depth:
# new_row.append(';'.join(map(str, depth_list)))
# elif parameter == new_par_name_values:
# new_row.append(';'.join(map(str, value_list)))
# elif parameter in exclude_list:
# new_row.append(np.nan)
# else:
# new_row.append(value)
#
# self.all_data.loc[next_index, :] = new_row
#
# next_index += 1
# class Calculations():
# def __init__(self):
# pass
#
# based_on_par_boolean = ~self.all_data[based_on_par].isnull()
#
#
#
#
# depths = self.get_float_array(u'DEPH', ignore_qf=exclude_qf)
# index = np.where((depths >= depth_interval[0]) & (depths <= depth_interval[-1]))[0]
# depths = depths[index]
# values = self.get_float_array(par, ignore_qf=exclude_qf)[index]
#
#
# # First remove empty values and nan
# missing_data_at_depth = []
# depth_list = []
# value_list = []
# for d, v in zip(depths, values):
# if not np.isnan(d) and not np.isnan(v):
# depth_list.append(d)
# value_list.append(v)
# else:
# missing_data_at_depth.append(d)
#
# sum_list = []
# if len(depth_list) >= min_nr_values:
# # Make sure to integrate the whole surface lager if selected
# if depth_list[0] != depth_interval[0]:
# depth_list.insert(0, depth_interval[0])
# value_list.insert(0, value_list[0])
# if depth_list[-1] != depth_interval[-1]:
# depth_list.append(depth_interval[-1])
# value_list.append(value_list[-1])
#
# for z0, z1, v0, v1 in zip(depth_list[:-1], depth_list[1:],
# value_list[:-1], value_list[1:]):
#
# part_sum = 0.5*(v1+v0)*(z1-z0)
#
# sum_list.append(part_sum)
#
# mean_value = sum(sum_list)/(depth_list[-1]-depth_list[0])
# else:
# if missing_value != None:
# mean_value = missing_value
# else:
# mean_value = np.nan
#
# calculations = Calculations()
# calculations.exclude_qf = exclude_qf
# calculations.min_nr_values = min_nr_values
# calculations.depth_interval = depth_interval
# calculations.used_values = [round(v, 2) for v in value_list]
# calculations.used_depths = depth_list
# calculations.nr_values_used = len(calculations.used_values)
# calculations.segments = sum_list
# calculations.missing_data_at_depth = missing_data_at_depth
# calculations.value = mean_value
#
# return calculations
#==========================================================================
def load_data(self, directory):
try:
column_file_path = directory + '/column_data.txt'
self.column_data = pd.read_csv(column_file_path, sep='\t', encoding='cp1252')
except:
pass
try:
row_file_path = directory + '/row_data.txt'
self.row_data = pd.read_csv(row_file_path, sep='\t', encoding='cp1252')
except:
pass
if __name__ == '__main__':
print('='*50)
print('Running module "data_handler.py"')
print('-'*50)
print('')
#
# raw_data_file_path = 'D:/Utveckling/g_EKOSTAT_tool/test_data/raw_data/data_BAS_2000-2009.txt'
# first_filter_directory = 'D:/Utveckling/g_EKOSTAT_tool/test_data/filtered_data'
# Handler
# raw_data = core.DataHandler('raw')
# raw_data.add_txt_file(raw_data_file_path, data_type='column')
#
print('-'*50)
print('done')
print('-'*50)
| mit |
mmottahedi/neuralnilm_prototype | scripts/e380.py | 2 | 6369 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
"""
e370
longer seq
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=128,
subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=True,
target_is_prediction=False,
independently_center_inputs=True,
standardise_input=True,
unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-3,
learning_rate_changes_by_iteration={
500: 1e-4,
1500: 1e-5
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': tanh,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh
}
]
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
])
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
olakiril/pipeline | python/pipeline/stack.py | 1 | 136839 | """ Schemas for structural stacks. """
import datajoint as dj
from datajoint.jobs import key_hash
import matplotlib.pyplot as plt
import numpy as np
import scanreader
from scipy import signal
from scipy import ndimage
from scipy import optimize
import itertools
from . import experiment, notify, shared, reso, meso
anatomy = dj.create_virtual_module('pipeline_anatomy','pipeline_anatomy')
from .utils import galvo_corrections, stitching, performance, enhancement
from .utils.signal import mirrconv, float2uint8
from .exceptions import PipelineException
""" Note on our coordinate system:
Our stack/motor coordinate system is consistent with numpy's: z in the first axis pointing
downwards, y in the second axis pointing towards you and x on the third axis pointing to
the right.
"""
dj.config['external-stack'] = {'protocol': 'file',
'location': '/mnt/dj-stor01/pipeline-externals'}
dj.config['cache'] = '/tmp/dj-cache'
schema = dj.schema('pipeline_stack', locals(), create_tables=False)
@schema
class StackInfo(dj.Imported):
definition = """ # master table with general data about the stacks
-> experiment.Stack
---
nrois : tinyint # number of ROIs
nchannels : tinyint # number of channels
fill_fraction : float # raster scan temporal fill fraction (see scanimage)
"""
class ROI(dj.Part):
definition = """ # 3-D volumes that compose this stack (usually tiled to form a bigger fov)
-> StackInfo
roi_id : tinyint # same as ScanImage's
---
-> experiment.Stack.Filename
field_ids : blob # list of field_ids (0-index) sorted from shallower to deeper
roi_z : float # (um) center of ROI in the motor coordinate system (cortex is at 0)
roi_y : float # (um) center of ROI in the motor coordinate system
roi_x : float # (um) center of ROI in the motor coordinate system
roi_px_depth : smallint # number of slices
roi_px_height : smallint # lines per frame
roi_px_width : smallint # pixels per line
roi_um_depth : float # depth in microns
roi_um_height : float # height in microns
roi_um_width : float # width in microns
nframes : smallint # number of recorded frames per plane
fps : float # (Hz) volumes per second
bidirectional : boolean # true = bidirectional scanning
is_slow : boolean # whether all frames in one depth were recorded before moving to the next
"""
def _make_tuples(self, key, stack, id_in_file):
# Create results tuple
tuple_ = key.copy()
# Get field_ids ordered from shallower to deeper field in this ROI
surf_z = (experiment.Stack() & key).fetch1('surf_depth') # surface depth in fastZ coordinates (meso) or motor coordinates (reso)
if stack.is_multiROI:
field_ids = [i for i, field_roi in enumerate(stack.field_rois) if
id_in_file in field_roi]
field_depths = [stack.field_depths[i] - surf_z for i in field_ids]
else:
field_ids = range(stack.num_scanning_depths)
motor_zero = surf_z - stack.motor_position_at_zero[2]
if stack.is_slow_stack and not stack.is_slow_stack_with_fastZ: # using motor
initial_fastZ = stack.initial_secondary_z or 0
field_depths = [motor_zero - stack.field_depths[i] + 2 * initial_fastZ
for i in field_ids]
else: # using fastZ
field_depths = [motor_zero + stack.field_depths[i] for i in field_ids]
field_depths, field_ids = zip(*sorted(zip(field_depths, field_ids)))
tuple_['field_ids'] = field_ids
# Get reso/meso specific coordinates
x_zero, y_zero, _ = stack.motor_position_at_zero # motor x, y at ScanImage's 0
if stack.is_multiROI:
tuple_['roi_y'] = y_zero + stack._degrees_to_microns(stack.fields[
field_ids[0]].y)
tuple_['roi_x'] = x_zero + stack._degrees_to_microns(stack.fields[
field_ids[0]].x)
tuple_['roi_px_height'] = stack.field_heights[field_ids[0]]
tuple_['roi_px_width'] = stack.field_widths[field_ids[0]]
tuple_['roi_um_height'] = stack.field_heights_in_microns[field_ids[0]]
tuple_['roi_um_width'] = stack.field_widths_in_microns[field_ids[0]]
else:
tuple_['roi_y'] = y_zero
tuple_['roi_x'] = x_zero
tuple_['roi_px_height'] = stack.image_height
tuple_['roi_px_width'] = stack.image_width
# Estimate height and width in microns using measured FOVs for similar setups
fov_rel = (experiment.FOV() * experiment.Session() * experiment.Stack() &
key & 'session_date>=fov_ts')
zooms = fov_rel.fetch('mag').astype(np.float32) # zooms measured in same setup
closest_zoom = zooms[np.argmin(np.abs(np.log(zooms / stack.zoom)))]
dims = (fov_rel & 'ABS(mag - {}) < 1e-4'.format(closest_zoom)).fetch1(
'height', 'width')
um_height, um_width = [float(um) * (closest_zoom / stack.zoom) for um in
dims]
tuple_['roi_um_height'] = um_height * stack._y_angle_scale_factor
tuple_['roi_um_width'] = um_width * stack._x_angle_scale_factor
# Get common parameters
z_step = field_depths[1] - field_depths[0]
tuple_['roi_z'] = field_depths[0] + (field_depths[-1] - field_depths[0]) / 2
tuple_['roi_px_depth'] = len(field_ids)
tuple_['roi_um_depth'] = field_depths[-1] - field_depths[0] + z_step
tuple_['nframes'] = stack.num_frames
tuple_['fps'] = stack.fps
tuple_['bidirectional'] = stack.is_bidirectional
tuple_['is_slow'] = stack.is_slow_stack
self.insert1(tuple_)
@property
def microns_per_pixel(self):
""" Returns an array with microns per pixel in depth, height and width. """
um_dims = self.fetch1('roi_um_depth', 'roi_um_height', 'roi_um_width')
px_dims = self.fetch1('roi_px_depth', 'roi_px_height', 'roi_px_width')
return np.array([um_dim / px_dim for um_dim, px_dim in zip(um_dims, px_dims)])
def _make_tuples(self, key):
""" Read and store stack information."""
print('Reading header...')
# Read files forming this stack
filename_keys = (experiment.Stack.Filename() & key).fetch(dj.key)
stacks = []
for filename_key in filename_keys:
stack_filename = (experiment.Stack.Filename() &
filename_key).local_filenames_as_wildcard
stacks.append(scanreader.read_scan(stack_filename))
num_rois_per_file = [(s.num_rois if s.is_multiROI else 1) for s in stacks]
# Create Stack tuple
tuple_ = key.copy()
tuple_['nrois'] = np.sum(num_rois_per_file)
tuple_['nchannels'] = stacks[0].num_channels
tuple_['fill_fraction'] = stacks[0].temporal_fill_fraction
# Insert Stack
self.insert1(tuple_)
# Insert ROIs
roi_id = 1
for filename_key, num_rois, stack in zip(filename_keys, num_rois_per_file,
stacks):
for roi_id_in_file in range(num_rois):
roi_key = {**key, **filename_key, 'roi_id': roi_id}
StackInfo.ROI()._make_tuples(roi_key, stack, roi_id_in_file)
roi_id += 1
# Fill in CorrectionChannel if only one channel
if stacks[0].num_channels == 1:
CorrectionChannel().fill(key)
@schema
class Quality(dj.Computed):
definition = """ # different quality metrics for a scan (before corrections)
-> StackInfo
"""
class MeanIntensity(dj.Part):
definition = """ # mean intensity per frame and slice
-> Quality
-> StackInfo.ROI
-> shared.Channel
---
intensities : longblob # num_slices x num_frames
"""
class SummaryFrames(dj.Part):
definition = """ # mean slice at 8 different depths
-> Quality
-> StackInfo.ROI
-> shared.Channel
---
summary : longblob # h x w x 8
"""
class Contrast(dj.Part):
definition = """ # difference between 99 and 1 percentile per frame and slice
-> Quality
-> StackInfo.ROI
-> shared.Channel
---
contrasts : longblob # num_slices x num_frames
"""
def _make_tuples(self, key):
print('Computing quality metrics for stack', key)
# Insert in Quality
self.insert1(key)
for roi_tuple in (StackInfo.ROI() & key).fetch():
# Load ROI
roi_filename = (experiment.Stack.Filename() &
roi_tuple).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
for channel in range((StackInfo() & key).fetch1('nchannels')):
# Map: Compute quality metrics in each field
f = performance.parallel_quality_stack # function to map
field_ids = roi_tuple['field_ids']
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=channel)
# Reduce: Collect results
mean_intensities = np.empty((roi_tuple['roi_px_depth'],
roi_tuple['nframes']))
contrasts = np.empty((roi_tuple['roi_px_depth'], roi_tuple['nframes']))
for field_idx, field_mis, field_contrasts, _ in results:
mean_intensities[field_idx] = field_mis
contrasts[field_idx] = field_contrasts
frames = [res[3] for res in sorted(results, key=lambda res: res[0])]
frames = np.stack(frames[:: int(len(frames) / 8)], axis=-1) # frames at 8 diff depths
# Insert
roi_key = {**key, 'roi_id': roi_tuple['roi_id'], 'channel': channel + 1}
self.MeanIntensity().insert1({**roi_key, 'intensities': mean_intensities})
self.Contrast().insert1({**roi_key, 'contrasts': contrasts})
self.SummaryFrames().insert1({**roi_key, 'summary': frames})
self.notify(roi_key, frames, mean_intensities, contrasts)
@notify.ignore_exceptions
def notify(self, key, summary_frames, mean_intensities, contrasts):
# Send summary frames
import imageio
video_filename = '/tmp/' + key_hash(key) + '.gif'
percentile_99th = np.percentile(summary_frames, 99.5)
summary_frames = np.clip(summary_frames, None, percentile_99th)
summary_frames = float2uint8(summary_frames).transpose([2, 0, 1])
imageio.mimsave(video_filename, summary_frames, duration=0.4)
msg = ('summary frames for {animal_id}-{session}-{stack_idx} channel '
'{channel}').format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg)
# Send intensity and contrasts
figsize = (min(4, contrasts.shape[1] / 10 + 1), contrasts.shape[0] / 30 + 1) # set heuristically
fig, axes = plt.subplots(1, 2, figsize=figsize, sharex=True, sharey=True)
fig.tight_layout()
axes[0].set_title('Mean intensity', size='small')
axes[0].imshow(mean_intensities)
axes[0].set_ylabel('Slices')
axes[0].set_xlabel('Frames')
axes[1].set_title('Contrast (99 - 1 percentile)', size='small')
axes[1].imshow(contrasts)
axes[1].set_xlabel('Frames')
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('quality images for {animal_id}-{session}-{stack_idx} channel '
'{channel}').format(**key)
slack_user.notify(file=img_filename, file_title=msg)
@schema
class CorrectionChannel(dj.Manual):
definition = """ # channel to use for raster and motion correction
-> experiment.Stack
---
-> shared.Channel
"""
def fill(self, key, channel=1):
for stack_key in (StackInfo() & key).fetch(dj.key):
self.insert1({**stack_key, 'channel': channel}, ignore_extra_fields=True,
skip_duplicates=True)
@schema
class RasterCorrection(dj.Computed):
definition = """ # raster correction for bidirectional resonant scans
-> StackInfo.ROI # animal_id, session, stack_idx, roi_id, version
-> CorrectionChannel # animal_id, session, stack_idx
---
raster_phase : float # difference between expected and recorded scan angle
raster_std : float # standard deviation among raster phases in different slices
"""
def _make_tuples(self, key):
""" Compute raster phase discarding top and bottom 15% of slices and tapering
edges to avoid edge artifacts."""
print('Computing raster correction for ROI', key)
# Get some params
res = (StackInfo.ROI() & key).fetch1('bidirectional', 'roi_px_height',
'roi_px_width', 'field_ids')
is_bidirectional, image_height, image_width, field_ids = res
correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1
if is_bidirectional:
# Read the ROI
filename_rel = (experiment.Stack.Filename() & (StackInfo.ROI() & key))
roi_filename = filename_rel.local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Compute some parameters
skip_fields = max(1, int(round(len(field_ids) * 0.10)))
taper = np.sqrt(np.outer(signal.tukey(image_height, 0.4),
signal.tukey(image_width, 0.4)))
# Compute raster phase for each slice and take the median
raster_phases = []
for field_id in field_ids[skip_fields: -2 * skip_fields]:
# Create template (average frame tapered to avoid edge artifacts)
slice_ = roi[field_id, :, :, correction_channel, :].astype(np.float32,
copy=False)
anscombed = 2 * np.sqrt(slice_ - slice_.min(axis=(0, 1)) + 3 / 8) # anscombe transform
template = np.mean(anscombed, axis=-1) * taper
# Compute raster correction
raster_phases.append(galvo_corrections.compute_raster_phase(template,
roi.temporal_fill_fraction))
raster_phase = np.median(raster_phases)
raster_std = np.std(raster_phases)
else:
raster_phase = 0
raster_std = 0
# Insert
self.insert1({**key, 'raster_phase': raster_phase, 'raster_std': raster_std})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
msg = ('raster phase for {animal_id}-{session}-{stack_idx} roi {roi_id}: '
'{phase}').format(**key, phase=(self & key).fetch1('raster_phase'))
(notify.SlackUser() & (experiment.Session() & key)).notify(msg)
def correct(self, roi):
""" Correct roi with parameters extracted from self. In place.
:param np.array roi: ROI (fields, image_height, image_width, frames).
"""
raster_phase = self.fetch1('raster_phase')
fill_fraction = (StackInfo() & self).fetch1('fill_fraction')
if abs(raster_phase) < 1e-7:
corrected = roi.astype(np.float32, copy=False)
else:
corrected = roi # in_place
for i, field in enumerate(roi):
corrected[i] = galvo_corrections.correct_raster(field, raster_phase,
fill_fraction)
return corrected
@schema
class MotionCorrection(dj.Computed):
definition = """ # motion correction for each slice in the stack
-> RasterCorrection
---
y_shifts : longblob # y motion correction shifts (num_slices x num_frames)
x_shifts : longblob # x motion correction shifts (num_slices x num_frames)
"""
def _make_tuples(self, key):
""" Compute motion shifts to align frames over time and over slices."""
print('Computing motion correction for ROI', key)
# Get some params
res = (StackInfo.ROI() & key).fetch1('nframes', 'roi_px_height', 'roi_px_width',
'field_ids')
num_frames, image_height, image_width, field_ids = res
correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1
y_shifts = np.zeros([len(field_ids), num_frames])
x_shifts = np.zeros([len(field_ids), num_frames])
if num_frames > 1:
# Read the ROI
filename_rel = (experiment.Stack.Filename() & (StackInfo.ROI() & key))
roi_filename = filename_rel.local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Compute some params
skip_rows = int(round(image_height * 0.10))
skip_cols = int(round(image_width * 0.10))
# Map: Compute shifts in parallel
f = performance.parallel_motion_stack # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (StackInfo() & key).fetch1('fill_fraction')
max_y_shift, max_x_shift = 20 / (StackInfo.ROI() & key).microns_per_pixel[1:]
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=correction_channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'skip_rows': skip_rows,
'skip_cols': skip_cols,
'max_y_shift': max_y_shift,
'max_x_shift': max_x_shift})
# Reduce: Collect results
for field_idx, y_shift, x_shift in results:
y_shifts[field_idx] = y_shift
x_shifts[field_idx] = x_shift
# Insert
self.insert1({**key, 'y_shifts': y_shifts, 'x_shifts': x_shifts})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
fps, is_slow_stack = (StackInfo.ROI() & key).fetch1('fps', 'is_slow')
num_slices, num_frames = y_shifts.shape
fps = fps * (num_slices if is_slow_stack else 1)
seconds = np.arange(num_frames) / fps
fig, axes = plt.subplots(2, 1, figsize=(13, 10), sharex=True, sharey=True)
axes[0].set_title('Shifts in y for all slices')
axes[0].set_ylabel('Pixels')
axes[0].plot(seconds, y_shifts.T)
axes[1].set_title('Shifts in x for all slices')
axes[1].set_ylabel('Pixels')
axes[1].set_xlabel('Seconds')
axes[1].plot(seconds, x_shifts.T)
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename)
plt.close(fig)
msg = 'motion shifts for {animal_id}-{session}-{stack_idx} roi {roi_id}'.format(
**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def save_as_tiff(self, filename='roi.tif', channel=1):
""" Correct roi and save as a tiff file.
:param int channel: What channel to use. Starts at 1
"""
from tifffile import imsave
# Get some params
res = (StackInfo.ROI() & self).fetch1('field_ids', 'roi_px_depth',
'roi_px_height', 'roi_px_width')
field_ids, px_depth, px_height, px_width = res
# Load ROI
roi_filename = (experiment.Stack.Filename() & self).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Map: Apply corrections to each field in parallel
f = performance.parallel_correct_stack # function to map
raster_phase = (RasterCorrection() & self).fetch1('raster_phase')
fill_fraction = (StackInfo() & self).fetch1('fill_fraction')
y_shifts, x_shifts = self.fetch1('y_shifts', 'x_shifts')
results = performance.map_fields(f, roi, field_ids=field_ids, channel=channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'y_shifts': y_shifts,
'x_shifts': x_shifts})
# Reduce: Collect results
corrected_roi = np.empty((px_depth, px_height, px_width), dtype=np.float32)
for field_idx, corrected_field in results:
corrected_roi[field_idx] = corrected_field
print('Saving file at:', filename)
imsave(filename, corrected_roi)
@schema
class Stitching(dj.Computed):
definition = """ # stitches together overlapping rois
-> StackInfo
"""
@property
def key_source(self):
return StackInfo() - (StackInfo.ROI() - MotionCorrection()) # run iff all ROIs have been processed
class Volume(dj.Part):
definition = """ # union of ROIs from a stack (usually one volume per stack)
-> Stitching
volume_id : tinyint # id of this volume
"""
class ROICoordinates(dj.Part):
definition = """ # coordinates for each ROI in the stitched volume
-> Stitching # animal_id, session, stack_idx, version
-> MotionCorrection # animal_id, session, stack_idx, version, roi_id
---
-> Stitching.Volume # volume to which this ROI belongs
stitch_ys : blob # (px) center of each slice in a volume-wise coordinate system
stitch_xs : blob # (px) center of each slice in a volume-wise coordinate system
"""
def _make_tuples(self, key):
""" Stitch overlapping ROIs together and correct slice-to-slice alignment.
Iteratively stitches two overlapping ROIs if the overlapping dimension has the
same length (up to some relative tolerance). Stitching params are calculated per
slice.
Edge case: when two overlapping ROIs have different px/micron resolution
They won't be joined even if true height are the same (as pixel heights will
not match) or pixel heights could happen to match even if true heights are
different and thus they'll be erroneously stitched.
"""
print('Stitching ROIs for stack', key)
# Get some params
correction_channel = (CorrectionChannel() & key).fetch1('channel') - 1
# Read and correct ROIs forming this stack
print('Correcting ROIs...')
rois = []
for roi_tuple in (StackInfo.ROI() & key).fetch():
# Load ROI
roi_filename = (experiment.Stack.Filename() &
roi_tuple).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Map: Apply corrections to each field in parallel
f = performance.parallel_correct_stack # function to map
raster_phase = (RasterCorrection() & roi_tuple).fetch1('raster_phase')
fill_fraction = (StackInfo() & roi_tuple).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & roi_tuple).fetch1('y_shifts',
'x_shifts')
field_ids = roi_tuple['field_ids']
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=correction_channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'y_shifts': y_shifts,
'x_shifts': x_shifts,
'apply_anscombe': True})
# Reduce: Collect results
corrected_roi = np.empty((roi_tuple['roi_px_depth'],
roi_tuple['roi_px_height'],
roi_tuple['roi_px_width']), dtype=np.float32)
for field_idx, corrected_field in results:
corrected_roi[field_idx] = corrected_field
# Create ROI object
um_per_px = (StackInfo.ROI() & (StackInfo.ROI().proj() &
roi_tuple)).microns_per_pixel
px_z, px_y, px_x = np.array([roi_tuple['roi_{}'.format(dim)] for dim in
['z', 'y', 'x']]) / um_per_px
rois.append(stitching.StitchedROI(corrected_roi, x=px_x, y=px_y, z=px_z,
id_=roi_tuple['roi_id']))
def enhance(image, sigmas):
""" Enhance 2p image. See enhancement.py for details."""
return enhancement.sharpen_2pimage(enhancement.lcn(image, sigmas))
def join_rows(rois_):
""" Iteratively join all rois that overlap in the same row."""
sorted_rois = sorted(rois_, key=lambda roi: (roi.x, roi.y))
prev_num_rois = float('inf')
while len(sorted_rois) < prev_num_rois:
prev_num_rois = len(sorted_rois)
for left, right in itertools.combinations(sorted_rois, 2):
if left.is_aside_to(right):
roi_key = {**key, 'roi_id': left.roi_coordinates[0].id}
um_per_px = (StackInfo.ROI() & roi_key).microns_per_pixel
# Compute stitching shifts
neighborhood_size = 25 / um_per_px[1:]
left_ys, left_xs = [], []
for l, r in zip(left.slices, right.slices):
left_slice = enhance(l.slice, neighborhood_size)
right_slice = enhance(r.slice, neighborhood_size)
delta_y, delta_x = stitching.linear_stitch(left_slice,
right_slice,
r.x - l.x)
left_ys.append(r.y - delta_y)
left_xs.append(r.x - delta_x)
# Fix outliers
max_y_shift, max_x_shift = 10 / um_per_px[1:]
left_ys, left_xs, _ = galvo_corrections.fix_outliers(
np.array(left_ys), np.array(left_xs), max_y_shift,
max_x_shift, method='linear')
# Stitch together
right.join_with(left, left_xs, left_ys)
sorted_rois.remove(left)
break # restart joining
return sorted_rois
# Stitch overlapping rois recursively
print('Computing stitching parameters...')
prev_num_rois = float('Inf') # to enter the loop at least once
while len(rois) < prev_num_rois:
prev_num_rois = len(rois)
# Join rows
rois = join_rows(rois)
# Join columns
[roi.rot90() for roi in rois]
rois = join_rows(rois)
[roi.rot270() for roi in rois]
# Compute slice-to slice alignment
print('Computing slice-to-slice alignment...')
for roi in rois:
big_volume = roi.volume
num_slices, image_height, image_width = big_volume.shape
roi_key = {**key, 'roi_id': roi.roi_coordinates[0].id}
um_per_px = (StackInfo.ROI() & roi_key).microns_per_pixel
# Enhance
neighborhood_size = 25 / um_per_px[1:]
for i in range(num_slices):
big_volume[i] = enhance(big_volume[i], neighborhood_size)
# Drop 10% of the image borders
skip_rows = int(round(image_height * 0.1))
skip_columns = int(round(image_width * 0.1))
big_volume = big_volume[:, skip_rows:-skip_rows, skip_columns: -skip_columns]
y_aligns = np.zeros(num_slices)
x_aligns = np.zeros(num_slices)
for i in range(1, num_slices):
# Align current slice to previous one
y_aligns[i], x_aligns[i] = galvo_corrections.compute_motion_shifts(
big_volume[i], big_volume[i - 1], in_place=False)
# Fix outliers
max_y_shift, max_x_shift = 15 / um_per_px[1:]
y_fixed, x_fixed, _ = galvo_corrections.fix_outliers(y_aligns, x_aligns,
max_y_shift, max_x_shift)
# Accumulate shifts so shift i is shift in i -1 plus shift to align i to i-1
y_cumsum, x_cumsum = np.cumsum(y_fixed), np.cumsum(x_fixed)
# Detrend to discard influence of vessels going through the slices
filter_size = int(round(60 / um_per_px[0])) # 60 microns in z
filter_size += 1 if filter_size % 2 == 0 else 0
if len(y_cumsum) > filter_size:
smoothing_filter = signal.hann(filter_size)
smoothing_filter /= sum(smoothing_filter)
y_detrend = y_cumsum - mirrconv(y_cumsum, smoothing_filter)
x_detrend = x_cumsum - mirrconv(x_cumsum, smoothing_filter)
else:
y_detrend = y_cumsum - y_cumsum.mean()
x_detrend = x_cumsum - x_cumsum.mean()
# Apply alignment shifts in roi
for slice_, y_align, x_align in zip(roi.slices, y_detrend, x_detrend):
slice_.y -= y_align
slice_.x -= x_align
for roi_coord in roi.roi_coordinates:
roi_coord.ys = [prev_y - y_align for prev_y, y_align in zip(roi_coord.ys,
y_detrend)]
roi_coord.xs = [prev_x - x_align for prev_x, x_align in zip(roi_coord.xs,
x_detrend)]
# Insert in Stitching
print('Inserting...')
self.insert1(key)
# Insert each stitched volume
for volume_id, roi in enumerate(rois, start=1):
self.Volume().insert1({**key, 'volume_id': volume_id})
# Insert coordinates of each ROI forming this volume
for roi_coord in roi.roi_coordinates:
tuple_ = {**key, 'roi_id': roi_coord.id, 'volume_id': volume_id,
'stitch_xs': roi_coord.xs, 'stitch_ys': roi_coord.ys}
self.ROICoordinates().insert1(tuple_)
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
slack_user = (notify.SlackUser() & (experiment.Session() & key))
for volume_key in (self.Volume() & key).fetch('KEY'):
for roi_coord in (self.ROICoordinates() & volume_key).fetch(as_dict=True):
center_z, num_slices, um_depth = (StackInfo.ROI() & roi_coord).fetch1(
'roi_z', 'roi_px_depth', 'roi_um_depth')
first_z = center_z - um_depth / 2 + (um_depth / num_slices) / 2
depths = first_z + (um_depth / num_slices) * np.arange(num_slices)
fig, axes = plt.subplots(2, 1, figsize=(15, 8), sharex=True)
axes[0].set_title('Center position (x)')
axes[0].plot(depths, roi_coord['stitch_xs'])
axes[1].set_title('Center position (y)')
axes[1].plot(depths, roi_coord['stitch_ys'])
axes[0].set_ylabel('Pixels')
axes[0].set_xlabel('Depths')
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('stitch traces for {animal_id}-{session}-{stack_idx} volume '
'{volume_id} roi {roi_id}').format(**roi_coord)
slack_user.notify(file=img_filename, file_title=msg)
@schema
class CorrectedStack(dj.Computed):
definition = """ # all slices of each stack after corrections.
-> Stitching.Volume # animal_id, session, stack_idx, volume_id
---
z : float # (um) center of volume in the motor coordinate system (cortex is at 0)
y : float # (um) center of volume in the motor coordinate system
x : float # (um) center of volume in the motor coordinate system
px_depth : smallint # number of slices
px_height : smallint # lines per frame
px_width : smallint # pixels per line
um_depth : float # depth in microns
um_height : float # height in microns
um_width : float # width in microns
surf_z : float # (um) depth of first slice - half a z step (cortex is at z=0)
"""
class Slice(dj.Part):
definition = """ # single slice of one stack
-> CorrectedStack
-> shared.Channel
islice : smallint # index of slice in volume
---
slice : longblob # image (height x width)
"""
def _make_tuples(self, key):
print('Correcting stack', key)
for channel in range((StackInfo() & key).fetch1('nchannels')):
# Correct ROIs
rois = []
for roi_tuple in (StackInfo.ROI() * Stitching.ROICoordinates() & key).fetch():
# Load ROI
roi_filename = (experiment.Stack.Filename() &
roi_tuple).local_filenames_as_wildcard
roi = scanreader.read_scan(roi_filename)
# Map: Apply corrections to each field in parallel
f = performance.parallel_correct_stack # function to map
raster_phase = (RasterCorrection() & roi_tuple).fetch1('raster_phase')
fill_fraction = (StackInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & roi_tuple).fetch1('y_shifts',
'x_shifts')
field_ids = roi_tuple['field_ids']
results = performance.map_fields(f, roi, field_ids=field_ids,
channel=channel,
kwargs={'raster_phase': raster_phase,
'fill_fraction': fill_fraction,
'y_shifts': y_shifts,
'x_shifts': x_shifts})
# Reduce: Collect results
corrected_roi = np.empty((roi_tuple['roi_px_depth'],
roi_tuple['roi_px_height'],
roi_tuple['roi_px_width']), dtype=np.float32)
for field_idx, corrected_field in results:
corrected_roi[field_idx] = corrected_field
# Create ROI object (with pixel x, y, z coordinates)
px_z = roi_tuple['roi_z'] * (roi_tuple['roi_px_depth'] /
roi_tuple['roi_um_depth'])
ys = list(roi_tuple['stitch_ys'])
xs = list(roi_tuple['stitch_xs'])
rois.append(stitching.StitchedROI(corrected_roi, x=xs, y=ys, z=px_z,
id_=roi_tuple['roi_id']))
def join_rows(rois_):
""" Iteratively join all rois that overlap in the same row."""
sorted_rois = sorted(rois_, key=lambda roi: (roi.x, roi.y))
prev_num_rois = float('inf')
while len(sorted_rois) < prev_num_rois:
prev_num_rois = len(sorted_rois)
for left, right in itertools.combinations(sorted_rois, 2):
if left.is_aside_to(right):
left_xs = [s.x for s in left.slices]
left_ys = [s.y for s in left.slices]
right.join_with(left, left_xs, left_ys)
sorted_rois.remove(left)
break # restart joining
return sorted_rois
# Stitch all rois together. This is convoluted because smooth blending in
# join_with assumes rois are next to (not below or atop of) each other
prev_num_rois = float('Inf') # to enter the loop at least once
while len(rois) < prev_num_rois:
prev_num_rois = len(rois)
# Join rows
rois = join_rows(rois)
# Join columns
[roi.rot90() for roi in rois]
rois = join_rows(rois)
[roi.rot270() for roi in rois]
# Check stitching went alright
if len(rois) > 1:
msg = 'ROIs for volume {} could not be stitched properly'.format(key)
raise PipelineException(msg)
stitched = rois[0]
# Insert in CorrectedStack
roi_info = StackInfo.ROI() & key & {'roi_id': stitched.roi_coordinates[0].id}
um_per_px = roi_info.microns_per_pixel
tuple_ = key.copy()
tuple_['z'] = stitched.z * um_per_px[0]
tuple_['y'] = stitched.y * um_per_px[1]
tuple_['x'] = stitched.x * um_per_px[2]
tuple_['px_depth'] = stitched.depth
tuple_['px_height'] = stitched.height
tuple_['px_width'] = stitched.width
tuple_['um_depth'] = roi_info.fetch1('roi_um_depth') # same as original rois
tuple_['um_height'] = stitched.height * um_per_px[1]
tuple_['um_width'] = stitched.width * um_per_px[2]
tuple_['surf_z'] = (stitched.z - stitched.depth / 2) * um_per_px[0]
self.insert1(tuple_, skip_duplicates=True)
# Insert each slice
for i, slice_ in enumerate(stitched.volume):
self.Slice().insert1({**key, 'channel': channel + 1, 'islice': i + 1,
'slice': slice_})
self.notify({**key, 'channel': channel + 1})
@notify.ignore_exceptions
def notify(self, key):
import imageio
volume = (self & key).get_stack(channel=key['channel'])
volume = volume[:: int(volume.shape[0] / 8)] # volume at 8 diff depths
video_filename = '/tmp/' + key_hash(key) + '.gif'
imageio.mimsave(video_filename, float2uint8(volume), duration=1)
msg = ('corrected stack for {animal_id}-{session}-{stack_idx} volume {volume_id} '
'channel {channel}').format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg,
channel='#pipeline_quality')
@property
def microns_per_pixel(self):
""" Returns an array with microns per pixel in depth, height and width. """
um_dims = self.fetch1('um_depth', 'um_height', 'um_width')
px_dims = self.fetch1('px_depth', 'px_height', 'px_width')
return np.array([um_dim / px_dim for um_dim, px_dim in zip(um_dims, px_dims)])
def get_stack(self, channel=1):
""" Get full stack (num_slices, height, width).
:param int channel: What channel to use. Starts at 1
:returns The stack: a (num_slices, image_height, image_width) array.
:rtype: np.array (float32)
"""
slice_rel = (CorrectedStack.Slice() & self & {'channel': channel})
slices = slice_rel.fetch('slice', order_by='islice')
return np.stack(slices)
def save_as_tiff(self, filename='stack.tif'):
""" Save current stack as a tiff file."""
from tifffile import imsave
# Create a composite interleaving channels
height, width, depth = self.fetch1('px_height', 'px_width', 'px_depth')
num_channels = (StackInfo() & self).fetch1('nchannels')
composite = np.zeros([num_channels * depth, height, width], dtype=np.float32)
for i in range(num_channels):
composite[i::num_channels] = self.get_stack(i + 1)
# Save
print('Saving file at:', filename)
imsave(filename, composite)
def save_video(self, filename='stack.mp4', channel=1, fps=10, dpi=250):
""" Creates an animation video showing a fly-over of the stack (top to bottom).
:param string filename: Output filename (path + filename)
:param int channel: What channel to use. Starts at 1
:param int start_index: Where in the scan to start the video.
:param int fps: Number of slices shown per second.
:param int dpi: Dots per inch, controls the quality of the video.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from matplotlib import animation
stack = self.get_stack(channel=channel)
num_slices = stack.shape[0]
fig, axes = plt.subplots(1, 1, sharex=True, sharey=True)
im = fig.gca().imshow(stack[int(num_slices / 2)])
video = animation.FuncAnimation(fig, lambda i: im.set_data(stack[i]), num_slices,
interval=1000 / fps)
fig.tight_layout()
if not filename.endswith('.mp4'):
filename += '.mp4'
print('Saving video at:', filename)
print('If this takes too long, stop it and call again with dpi <', dpi,
'(default)')
video.save(filename, dpi=dpi)
return fig
@schema
class PreprocessedStack(dj.Computed):
definition = """ # Resize to 1 um^3, apply local contrast normalization and sharpen
-> CorrectedStack
-> shared.Channel
---
resized: external-stack # original stack resized to 1 um^3
lcned: external-stack # local contrast normalized stack. Filter size: (3, 25, 25)
sharpened: external-stack # sharpened stack. Filter size: 1
"""
@property
def key_source(self):
# restrict each stack to its channels
return (CorrectedStack * shared.Channel).proj() & CorrectedStack.Slice.proj()
def make(self, key):
from .utils import registration
from .utils import enhancement
# Load stack
stack = (CorrectedStack() & key).get_stack(key['channel'])
# Resize to be 1 um^3
um_sizes = (CorrectedStack & key).fetch1('um_depth', 'um_height', 'um_width')
resized = registration.resize(stack, um_sizes, desired_res=1)
# Enhance
lcned = enhancement.lcn(resized, (3, 25, 25))
# Sharpen
sharpened = enhancement.sharpen_2pimage(lcned, 1)
# Insert
self.insert1({**key, 'resized': resized, 'lcned': lcned, 'sharpened': sharpened})
@schema
class Surface(dj.Computed):
definition = """ # Calculated surface of the brain
-> PreprocessedStack
-> shared.SurfaceMethod
---
guessed_points : longblob # Array of guessed depths stored in (z,y,x) format
surface_im : longblob # Matrix of fitted depth for each pixel in stack. Value is number of pixels to surface from top of array.
lower_bound_im : longblob # Lower bound of 95th percentile confidence interval
upper_bound_im : longblob # Upper bound of 95th percentile confidence interval
"""
def make(self, key):
# WARNINGS
# - This code assumes the surface will be in the top half of the stack
# - Only the top half of z-values are analyzed
# - Points along the edge are dropped to avoid errors due to blank space left by stack registration
# - This code assumes the surface median intensity should be in the bottom 60% of the range of values over z
# - ex. Intensities ranges from 10-20. Surface points must have an intensity < .6*(20-10) + 10 = 17.5
# - This is within the 2r x 2r window being analyzed
# - This code drops any 2r x 2r field where the first median value is above the 30th-percentile of the whole stack.
# - Windows where the final median intensity is below 10 are removed
# - Attempts to replace this with a percentile all fail
# - This code drops guessed depths > 95th-percentile and < 5th-percentile to be more robust to outliers
valid_method_ids = [1] # Used to check if method is implemented
# SETTINGS
# Note: Intial parameters for fitting set further down
r = 50 # Radius of square in pixels
upper_threshold_percent = 0.6 # Surface median intensity should be in the bottom X% of the *range* of medians
gaussian_blur_size = 5 # Size of gaussian blur applied to slice
min_points_allowed = 10 # If there are less than X points after filtering, throw an error
bounds = ([0, 0, np.NINF, np.NINF, np.NINF], [np.Inf, np.Inf, np.Inf, np.Inf, np.Inf]) # Bounds for paraboloid fit
ss_percent = 0.40 # Percentage of points to subsample for robustness check
num_iterations = 1000 # Number of iterations to use for robustness check
# DEFINITIONS
def surface_eqn(data, a, b, c, d, f):
x, y = data
return a * x ** 2 + b * y ** 2 + c * x + d * y + f
# MAIN BODY
if int(key['surface_method_id']) not in valid_method_ids:
raise PipelineException(f'Error: surface_method_id {key["surface_method_id"]} is not implemented')
print('Calculating surface of brain for stack', key)
full_stack = (PreprocessedStack & key).fetch1('resized')
depth, height, width = full_stack.shape
surface_guess_map = []
r_xs = np.arange(r, width - width % r, r * 2)[1:-1]
r_ys = np.arange(r, height - height % r, r * 2)[1:-1]
full_mesh_x, full_mesh_y = np.meshgrid(np.arange(width), np.arange(height))
# Surface z should be below this value
z_lim = int(depth / 2)
# Mean intensity of the first frame in the slice should be less than this value
z_0_upper_threshold = np.percentile(full_stack, 30)
for x in r_xs:
for y in r_ys:
stack_slice_medians = np.percentile(full_stack[0:z_lim, y - r:y + r, x - r:x + r], 50, axis=(1, 2))
blurred_slice = ndimage.gaussian_filter1d(stack_slice_medians, gaussian_blur_size)
upper_threshold_value = upper_threshold_percent * (
(blurred_slice.max() - blurred_slice.min()) - blurred_slice.min())
upper_threshold_idx = np.where(blurred_slice > upper_threshold_value)[0][0]
stack_slice_derivative = ndimage.sobel(blurred_slice)
surface_z = np.argmax(stack_slice_derivative)
if ((surface_z < upper_threshold_idx) and (blurred_slice[0] < z_0_upper_threshold) and
(blurred_slice[-1] > 10)):
surface_guess_map.append((surface_z, y, x))
if len(surface_guess_map) < min_points_allowed:
raise PipelineException(f"Surface calculation could not find enough valid points for {key}. Only "
f"{len(surface_guess_map)} detected")
# Drop the z-values lower than 5th-percentile or greater than 95th-percentile
arr = np.array(surface_guess_map)
top = np.percentile(arr[:, 0], 95)
bot = np.percentile(arr[:, 0], 5)
surface_guess_map = arr[np.logical_and(arr[:, 0] > bot, arr[:, 0] < top)]
# Guess for initial parameters
initial = [1, 1, int(width / 2), int(height / 2), 1]
popt, pcov = optimize.curve_fit(surface_eqn, (surface_guess_map[:, 2], surface_guess_map[:, 1]),
surface_guess_map[:, 0], p0=initial, maxfev=10000, bounds=bounds)
calculated_surface_map = surface_eqn((full_mesh_x, full_mesh_y), *popt)
all_sub_fitted_z = np.zeros((num_iterations, height, width))
for i in np.arange(num_iterations):
indices = np.random.choice(surface_guess_map.shape[0], int(surface_guess_map.shape[0] * ss_percent),
replace=False)
subsample = surface_guess_map[indices]
sub_popt, sub_pcov = optimize.curve_fit(surface_eqn, (subsample[:, 2], subsample[:, 1]), subsample[:, 0],
p0=initial, maxfev=10000, bounds=bounds)
all_sub_fitted_z[i, :, :] = surface_eqn((full_mesh_x, full_mesh_y), *sub_popt)
z_min_matrix = np.percentile(all_sub_fitted_z, 5, axis=0)
z_max_matrix = np.percentile(all_sub_fitted_z, 95, axis=0)
surface_key = {**key, 'guessed_points': surface_guess_map, 'surface_im': calculated_surface_map,
'lower_bound_im': z_min_matrix, 'upper_bound_im': z_max_matrix}
self.insert1(surface_key)
def plot_surface3d(self, fig_height=7, fig_width=9):
""" Plot guessed surface points and fitted surface mesh in 3D
:param fig_height: Height of returned figure
:param fig_width: Width of returned figure
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
surface_guess_map, fitted_surface = self.fetch1('guessed_points', 'surface_im')
surface_height, surface_width = fitted_surface.shape
mesh_x, mesh_y = np.meshgrid(np.arange(surface_width), np.arange(surface_height))
fig = plt.figure(figsize=(fig_width, fig_height))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(mesh_x, mesh_y, fitted_surface, cmap=cm.coolwarm, linewidth=0, antialiased=False,
alpha=0.5)
ax.scatter(surface_guess_map[:, 2], surface_guess_map[:, 1], surface_guess_map[:, 0], color='grey')
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.invert_zaxis()
return fig
def plot_surface2d(self, r=50, z=None, fig_height=10, fig_width=20):
""" Plot grid of guessed points and fitted surface depths spaced 2r apart on top of stack slice at depth = z
:param r: Defines radius of square for each grid point
:param z: Pixel depth of stack to show behind depth grid
:param fig_height: Height of returned figure
:param fig_width: Width of returned figure
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from matplotlib import cm
full_stack = (PreprocessedStack & self).fetch1('resized')
stack_depth, stack_height, stack_width = full_stack.shape
surface_guess_map, fitted_surface = self.fetch1('guessed_points', 'surface_im')
fig, axes = plt.subplots(1, 2, figsize=(fig_width, fig_height))
r_xs = np.arange(r, stack_width - stack_width % r, r * 2)
r_ys = np.arange(r, stack_height - stack_height % r, r * 2)
r_mesh_x, r_mesh_y = np.meshgrid(r_xs, r_ys)
# Using median of depth to pick slice of stack to show if not defined
if z is None:
z = np.median(fitted_surface)
if z < 0 or z > stack_depth:
raise PipelineException(f'Error: Z parameter {z} is out of bounds for stack with depth {depth}')
vmin = np.min((np.min(fitted_surface), np.min(surface_guess_map[:, 0])))
vmax = np.max((np.max(fitted_surface), np.max(surface_guess_map[:, 0])))
guessed_scatter = axes[0].scatter(x=surface_guess_map[:, 2], y=surface_guess_map[:, 1],
c=surface_guess_map[:, 0], cmap=cm.hot, vmin=vmin, vmax=vmax)
fitted_scatter = axes[1].scatter(x=r_mesh_x, y=r_mesh_y, c=fitted_surface[r_mesh_y, r_mesh_x], cmap=cm.hot,
vmin=vmin, vmax=vmax)
for point in surface_guess_map:
axes[0].annotate(int(point[0]), (point[2], point[1]), color='white')
for x in r_xs:
for y in r_ys:
axes[1].annotate(int(fitted_surface[y, x]), (x, y), color='white')
fig.colorbar(guessed_scatter, ax=axes[0], fraction=0.05)
axes[0].set_title(f'Guessed Depth, Z = {int(z)}')
fig.colorbar(fitted_scatter, ax=axes[1], fraction=0.05)
axes[1].set_title(f'Fitted Depth, Z = {int(z)}')
for ax in axes:
ax.imshow(full_stack[int(z), :, :])
ax.set_axis_off()
return fig
@schema
class SegmentationTask(dj.Manual):
definition = """ # defines the target, the method and the channel to use for segmentation
-> CorrectedStack
-> shared.Channel
-> shared.StackSegmMethod
---
-> experiment.Compartment
"""
def fill(self, key, channel=1, stacksegm_method=2, compartment='soma'):
for stack_key in (CorrectedStack() & key).fetch(dj.key):
tuple_ = {**stack_key, 'channel': channel,
'stacksegm_method': stacksegm_method,
'compartment': compartment}
self.insert1(tuple_, ignore_extra_fields=True, skip_duplicates=True)
@schema
class Segmentation(dj.Computed):
definition = """ # 3-d stack segmentation
-> PreprocessedStack
-> SegmentationTask
---
segmentation : external-stack # voxel-wise cell-ids (0 for background)
nobjects : int # number of cells found
"""
class ConvNet(dj.Part):
definition = """ # attributes particular to convnet based methods
-> master
---
centroids : external-stack # voxel-wise probability of centroids
probs : external-stack # voxel-wise probability of cell nuclei
seg_threshold : float # threshold used for the probability maps
min_voxels : int # minimum number of voxels (in cubic microns)
max_voxels : int # maximum number of voxels (in cubic microns)
compactness_factor : float # compactness factor used for the watershed segmentation
"""
def _make_tuples(self, key):
from .utils import segmentation3d
# Set params
seg_threshold = 0.8
min_voxels = 65 # sphere of diameter 5
max_voxels = 4186 # sphere of diameter 20
compactness_factor = 0.05 # bigger produces rounder cells
pad_mode = 'reflect' # any valid mode in np.pad
# Get stack at 1 um**3 voxels
resized = (PreprocessedStack & key).fetch1('resized')
# Segment
if key['stacksegm_method'] not in [1, 2]:
raise PipelineException('Unrecognized stack segmentation method: {}'.format(
key['stacksegm_method']))
method = 'single' if key['stacksegm_method'] == 1 else 'ensemble'
centroids, probs, segmentation = segmentation3d.segment(resized, method, pad_mode,
seg_threshold, min_voxels,
max_voxels,
compactness_factor)
# Insert
self.insert1({**key, 'nobjects': segmentation.max(),
'segmentation': segmentation})
self.ConvNet().insert1({**key, 'centroids': centroids, 'probs': probs,
'seg_threshold': seg_threshold, 'min_voxels': min_voxels,
'max_voxels': max_voxels,
'compactness_factor': compactness_factor})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
import imageio
from bl3d import utils
volume = (self & key).fetch1('segmentation')
volume = volume[:: int(volume.shape[0] / 8)] # volume at 8 diff depths
colored = utils.colorize_label(volume)
video_filename = '/tmp/' + key_hash(key) + '.gif'
imageio.mimsave(video_filename, colored, duration=1)
msg = 'segmentation for {animal_id}-{session}-{stack_idx}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg,
channel='#pipeline_quality')
@schema
class RegistrationTask(dj.Manual):
definition = """ # declare scan fields to register to a stack as well as channels and method used
-> CorrectedStack.proj(stack_session='session') # animal_id, stack_session, stack_idx, volume_id
-> shared.Channel.proj(stack_channel='channel')
-> experiment.Scan.proj(scan_session='session') # animal_id, scan_session, scan_idx
-> shared.Channel.proj(scan_channel='channel')
-> shared.Field
-> shared.RegistrationMethod
"""
def fill(self, stack_key, scan_key, stack_channel=1, scan_channel=1, method=5):
# Add stack attributes
stack_rel = CorrectedStack() & stack_key
if len(stack_rel) > 1:
raise PipelineException('More than one stack match stack_key {}'.format(
stack_key))
tuple_ = stack_rel.proj(stack_session='session').fetch1()
# Add common attributes
tuple_['stack_channel'] = stack_channel
tuple_['scan_channel'] = scan_channel
tuple_['registration_method'] = method
# Add scan attributes
fields_rel = reso.ScanInfo.Field.proj() + meso.ScanInfo.Field.proj() & scan_key
scan_animal_ids = np.unique(fields_rel.fetch('animal_id'))
if len(scan_animal_ids) > 1 or scan_animal_ids[0] != tuple_['animal_id']:
raise PipelineException('animal_id of stack and scan do not match.')
for field in fields_rel.fetch():
RegistrationTask().insert1({**tuple_, 'scan_session': field['session'],
'scan_idx': field['scan_idx'],
'field': field['field']}, skip_duplicates=True)
@schema
class Registration(dj.Computed):
""" Our affine matrix A is represented as the usual 4 x 4 matrix using homogeneous
coordinates, i.e., each point p is an [x, y, z, 1] vector.
Because each field is flat, the original z coordinate will be the same at each grid
position (zero) and thus it won't affect its final position, so our affine matrix has
only 9 parameters: a11, a21, a31, a12, a22, a32, a14, a24 and a34.
"""
definition = """ # align a 2-d scan field to a stack
-> PreprocessedStack.proj(stack_session='session', stack_channel='channel')
-> RegistrationTask
"""
@property
def key_source(self):
stacks = PreprocessedStack.proj(stack_session='session', stack_channel='channel')
return stacks * RegistrationTask & {'registration_method': 5}
class Rigid(dj.Part):
definition = """ # 3-d template matching keeping the stack straight
-> master
---
reg_x : float # (um) center of field in motor coordinate system
reg_y : float # (um) center of field in motor coordinate system
reg_z : float # (um) center of field in motor coordinate system
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Affine(dj.Part):
definition = """ # affine matrix learned via gradient ascent
-> master
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class NonRigid(dj.Part):
definition = """ # affine plus deformation field learned via gradient descent
-> master
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
landmarks : longblob # (um) x, y position of each landmark (num_landmarks x 2) assuming center of field is at (0, 0)
deformations : longblob # (um) x, y, z deformations per landmark (num_landmarks x 3)
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Params(dj.Part):
definition = """ # document some parameters used for the registration
-> master
---
rigid_zrange : int # microns above and below experimenter's estimate (in z) to search for rigid registration
lr_linear : float # learning rate for the linear part of the affine matrix
lr_translation : float # learning rate for the translation vector
affine_iters : int # number of iterations to learn the affine registration
random_seed : int # seed used to initialize landmark deformations
landmark_gap : int # number of microns between landmarks
rbf_radius : int # critical radius for the gaussian radial basis function
lr_deformations : float # learning rate for the deformation values
wd_deformations : float # regularization term to control size of the deformations
smoothness_factor : float # regularization term to control curvature of warping field
nonrigid_iters : int # number of iterations to optimize for the non-rigid parameters
"""
def make(self, key):
from .utils import registration
from .utils import enhancement
# Set params
rigid_zrange = 80 # microns to search above and below estimated z for rigid registration
lr_linear = 0.001 # learning rate / step size for the linear part of the affine matrix
lr_translation = 1 # learning rate / step size for the translation vector
affine_iters = 200 # number of optimization iterations to learn the affine parameters
random_seed = 1234 # seed for torch random number generator (used to initialize deformations)
landmark_gap = 100 # spacing for the landmarks
rbf_radius = 150 # critical radius for the gaussian rbf
lr_deformations = 0.1 # learning rate / step size for deformation values
wd_deformations = 1e-4 # weight decay for deformations; controls their size
smoothness_factor = 0.01 # factor to keep the deformation field smooth
nonrigid_iters = 200 # number of optimization iterations for the nonrigid parameters
# Get enhanced stack
stack_key = {'animal_id': key['animal_id'], 'session': key['stack_session'],
'stack_idx': key['stack_idx'], 'volume_id': key['volume_id'],
'channel': key['stack_channel']}
original_stack = (PreprocessedStack & stack_key).fetch1('resized')
stack = (PreprocessedStack & stack_key).fetch1('sharpened')
# Get field
field_key = {'animal_id': key['animal_id'], 'session': key['scan_session'],
'scan_idx': key['scan_idx'], 'field': key['field'],
'channel': key['scan_channel']}
pipe = (reso if reso.ScanInfo & field_key else meso if meso.ScanInfo & field_key
else None)
original_field = (pipe.SummaryImages.Average & field_key).fetch1(
'average_image').astype(np.float32)
# Enhance field
field_dims = ((reso.ScanInfo if pipe == reso else meso.ScanInfo.Field) &
field_key).fetch1('um_height', 'um_width')
original_field = registration.resize(original_field, field_dims, desired_res=1)
field = enhancement.sharpen_2pimage(enhancement.lcn(original_field, (15, 15)), 1)
# Drop some edges to avoid artifacts
field = field[15:-15, 15:-15]
stack = stack[5:-5, 15:-15, 15:-15]
# RIGID REGISTRATION
from skimage import feature
# Get initial estimate of field depth from experimenters
field_z = (pipe.ScanInfo.Field & field_key).fetch1('z')
stack_z = (CorrectedStack & stack_key).fetch1('z')
z_limits = stack_z - stack.shape[0] / 2, stack_z + stack.shape[0] / 2
if field_z < z_limits[0] or field_z > z_limits[1]:
print('Warning: Estimated depth ({}) outside stack range ({}-{}).'.format(
field_z, *z_limits))
# Run registration with no rotations
px_z = field_z - stack_z + stack.shape[0] / 2 - 0.5
mini_stack = stack[max(0, int(round(px_z - rigid_zrange))): int(round(
px_z + rigid_zrange))]
corrs = np.stack([feature.match_template(s, field, pad_input=True) for s in
mini_stack])
smooth_corrs = ndimage.gaussian_filter(corrs, 0.7)
# Get results
min_z = max(0, int(round(px_z - rigid_zrange)))
min_y = int(round(0.05 * stack.shape[1]))
min_x = int(round(0.05 * stack.shape[2]))
mini_corrs = smooth_corrs[:, min_y:-min_y, min_x:-min_x]
rig_z, rig_y, rig_x = np.unravel_index(np.argmax(mini_corrs), mini_corrs.shape)
# Rewrite coordinates with respect to original z
rig_z = (min_z + rig_z + 0.5) - stack.shape[0] / 2
rig_y = (min_y + rig_y + 0.5) - stack.shape[1] / 2
rig_x = (min_x + rig_x + 0.5) - stack.shape[2] / 2
del (field_z, stack_z, z_limits, px_z, mini_stack, corrs, smooth_corrs, min_z,
min_y, min_x, mini_corrs)
# AFFINE REGISTRATION
import torch
from torch import optim
import torch.nn.functional as F
def sample_grid(volume, grid):
""" Volume is a d x h x w arrray, grid is a d1 x d2 x 3 (x, y, z) coordinates
and output is a d1 x d2 array"""
norm_factor = torch.as_tensor([s / 2 - 0.5 for s in volume.shape[::-1]])
norm_grid = grid / norm_factor # between -1 and 1
resampled = F.grid_sample(volume.view(1, 1, *volume.shape),
norm_grid.view(1, 1, *norm_grid.shape),
padding_mode='zeros')
return resampled.squeeze()
# Create field grid (height x width x 2)
grid = registration.create_grid(field.shape)
# Create torch tensors
stack_ = torch.as_tensor(stack, dtype=torch.float32)
field_ = torch.as_tensor(field, dtype=torch.float32)
grid_ = torch.as_tensor(grid, dtype=torch.float32)
# Define parameters and optimizer
linear = torch.nn.Parameter(torch.eye(3)[:, :2]) # first two columns of rotation matrix
translation = torch.nn.Parameter(torch.tensor([rig_x, rig_y, rig_z])) # translation vector
affine_optimizer = optim.Adam([{'params': linear, 'lr': lr_linear},
{'params': translation, 'lr': lr_translation}])
# Optimize
for i in range(affine_iters):
# Zero gradients
affine_optimizer.zero_grad()
# Compute gradients
pred_grid = registration.affine_product(grid_, linear, translation) # w x h x 3
pred_field = sample_grid(stack_, pred_grid)
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
print('Corr at iteration {}: {:5.4f}'.format(i, -corr_loss))
corr_loss.backward()
# Update
affine_optimizer.step()
# Save em (originals will be modified during non-rigid registration)
affine_linear = linear.detach().clone()
affine_translation = translation.detach().clone()
# NON-RIGID REGISTRATION
# Inspired by the the Demon's Algorithm (Thirion, 1998)
torch.manual_seed(random_seed) # we use random initialization below
# Create landmarks (and their corresponding deformations)
first_y = int(round((field.shape[0] % landmark_gap) / 2))
first_x = int(round((field.shape[1] % landmark_gap) / 2))
landmarks = grid_[first_x::landmark_gap, first_y::landmark_gap].contiguous().view(
-1, 2) # num_landmarks x 2
# Compute rbf scores between landmarks and grid coordinates and between landmarks
grid_distances = torch.norm(grid_.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2) # w x h x num_landmarks
landmark_distances = torch.norm(landmarks.unsqueeze(-2) - landmarks, dim=-1)
landmark_scores = torch.exp(-(landmark_distances * (1 / 200)) ** 2) # num_landmarks x num_landmarks
# Define parameters and optimizer
deformations = torch.nn.Parameter(torch.randn((landmarks.shape[0], 3)) / 10) # N(0, 0.1)
nonrigid_optimizer = optim.Adam([deformations], lr=lr_deformations,
weight_decay=wd_deformations)
# Optimize
for i in range(nonrigid_iters):
# Zero gradients
affine_optimizer.zero_grad() # we reuse affine_optimizer so the affine matrix changes slowly
nonrigid_optimizer.zero_grad()
# Compute grid with radial basis
affine_grid = registration.affine_product(grid_, linear, translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
pred_field = sample_grid(stack_, pred_grid)
# Compute loss
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
# Compute cosine similarity between landmarks (and weight em by distance)
norm_deformations = deformations / torch.norm(deformations, dim=-1,
keepdim=True)
cosine_similarity = torch.mm(norm_deformations, norm_deformations.t())
reg_term = -((cosine_similarity * landmark_scores).sum() /
landmark_scores.sum())
# Compute gradients
loss = corr_loss + smoothness_factor * reg_term
print('Corr/loss at iteration {}: {:5.4f}/{:5.4f}'.format(i, -corr_loss,
loss))
loss.backward()
# Update
affine_optimizer.step()
nonrigid_optimizer.step()
# Save final results
nonrigid_linear = linear.detach().clone()
nonrigid_translation = translation.detach().clone()
nonrigid_landmarks = landmarks.clone()
nonrigid_deformations = deformations.detach().clone()
# COMPUTE SCORES (USING THE ENHANCED AND CROPPED VERSION OF THE FIELD)
# Rigid
pred_grid = registration.affine_product(grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
pred_field = sample_grid(stack_, pred_grid).numpy()
rig_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Affine
pred_grid = registration.affine_product(grid_, affine_linear, affine_translation)
pred_field = sample_grid(stack_, pred_grid).numpy()
affine_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Non-rigid
affine_grid = registration.affine_product(grid_, nonrigid_linear,
nonrigid_translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, nonrigid_deformations))
pred_grid = affine_grid + warping_field
pred_field = sample_grid(stack_, pred_grid).numpy()
nonrigid_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# FIND FIELDS IN STACK
# Create grid of original size (h x w x 2)
original_grid = registration.create_grid(original_field.shape)
# Create torch tensors
original_stack_ = torch.as_tensor(original_stack, dtype=torch.float32)
original_grid_ = torch.as_tensor(original_grid, dtype=torch.float32)
# Rigid
pred_grid = registration.affine_product(original_grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
rig_field = sample_grid(original_stack_, pred_grid).numpy()
# Affine
pred_grid = registration.affine_product(original_grid_, affine_linear,
affine_translation)
affine_field = sample_grid(original_stack_, pred_grid).numpy()
# Non-rigid
affine_grid = registration.affine_product(original_grid_, nonrigid_linear,
nonrigid_translation)
original_grid_distances = torch.norm(original_grid_.unsqueeze(-2) -
nonrigid_landmarks, dim=-1)
original_grid_scores = torch.exp(-(original_grid_distances * (1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (original_grid_scores,
nonrigid_deformations))
pred_grid = affine_grid + warping_field
nonrigid_field = sample_grid(original_stack_, pred_grid).numpy()
# Insert
stack_z, stack_y, stack_x = (CorrectedStack & stack_key).fetch1('z', 'y', 'x')
self.insert1(key)
self.Params.insert1({**key, 'rigid_zrange': rigid_zrange, 'lr_linear': lr_linear,
'lr_translation': lr_translation,
'affine_iters': affine_iters, 'random_seed': random_seed,
'landmark_gap': landmark_gap, 'rbf_radius': rbf_radius,
'lr_deformations': lr_deformations,
'wd_deformations': wd_deformations,
'smoothness_factor': smoothness_factor,
'nonrigid_iters': nonrigid_iters})
self.Rigid.insert1({**key, 'reg_x': stack_x + rig_x, 'reg_y': stack_y + rig_y,
'reg_z': stack_z + rig_z, 'score': rig_score,
'reg_field': rig_field})
self.Affine.insert1({**key, 'a11': affine_linear[0, 0].item(),
'a21': affine_linear[1, 0].item(),
'a31': affine_linear[2, 0].item(),
'a12': affine_linear[0, 1].item(),
'a22': affine_linear[1, 1].item(),
'a32': affine_linear[2, 1].item(),
'reg_x': stack_x + affine_translation[0].item(),
'reg_y': stack_y + affine_translation[1].item(),
'reg_z': stack_z + affine_translation[2].item(),
'score': affine_score, 'reg_field': affine_field})
self.NonRigid.insert1({**key, 'a11': nonrigid_linear[0, 0].item(),
'a21': nonrigid_linear[1, 0].item(),
'a31': nonrigid_linear[2, 0].item(),
'a12': nonrigid_linear[0, 1].item(),
'a22': nonrigid_linear[1, 1].item(),
'a32': nonrigid_linear[2, 1].item(),
'reg_x': stack_x + nonrigid_translation[0].item(),
'reg_y': stack_y + nonrigid_translation[1].item(),
'reg_z': stack_z + nonrigid_translation[2].item(),
'landmarks': nonrigid_landmarks.numpy(),
'deformations': nonrigid_deformations.numpy(),
'score': nonrigid_score, 'reg_field': nonrigid_field})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
# No notifications
pass
def get_grid(self, type='affine', desired_res=1):
""" Get registered grid for this registration. """
import torch
from .utils import registration
# Get field
field_key = self.proj(session='scan_session')
field_dims = (reso.ScanInfo & field_key or meso.ScanInfo.Field &
field_key).fetch1('um_height', 'um_width')
# Create grid at desired resolution
grid = registration.create_grid(field_dims, desired_res=desired_res) # h x w x 2
grid = torch.as_tensor(grid, dtype=torch.float32)
# Apply required transform
if type == 'rigid':
params = (Registration.Rigid & self).fetch1('reg_x', 'reg_y', 'reg_z')
delta_x, delta_y, delta_z = params
linear = torch.eye(3)[:, :2]
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'affine':
params = (Registration.Affine & self).fetch1('a11', 'a21', 'a31', 'a12',
'a22', 'a32', 'reg_x', 'reg_y',
'reg_z')
a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'nonrigid':
params = (Registration.NonRigid & self).fetch1('a11', 'a21', 'a31', 'a12',
'a22', 'a32', 'reg_x', 'reg_y',
'reg_z', 'landmarks',
'deformations')
rbf_radius = (Registration.Params & self).fetch1('rbf_radius')
a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z, landmarks, deformations = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
landmarks = torch.from_numpy(landmarks)
deformations = torch.from_numpy(deformations)
affine_grid = registration.affine_product(grid, linear, translation)
grid_distances = torch.norm(grid.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
else:
raise PipelineException('Unrecognized registration.')
return pred_grid.numpy()
def plot_grids(self, desired_res=5):
""" Plot the grids for this different registrations as 3-d surfaces."""
# Get grids at desired resoultion
rig_grid = self.get_grid('rigid', desired_res)
affine_grid = self.get_grid('affine', desired_res)
nonrigid_grid = self.get_grid('nonrigid', desired_res)
# Plot surfaces
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
fig = plt.figure(figsize=plt.figaspect(0.5) * 1.5)
ax = fig.gca(projection='3d')
ax.plot_surface(rig_grid[..., 0], rig_grid[..., 1], rig_grid[..., 2], alpha=0.5)
ax.plot_surface(affine_grid[..., 0], affine_grid[..., 1], affine_grid[..., 2],
alpha=0.5)
ax.plot_surface(nonrigid_grid[..., 0], nonrigid_grid[..., 1],
nonrigid_grid[..., 2], alpha=0.5)
ax.set_aspect('equal')
ax.invert_zaxis()
return fig
@schema
class FieldSegmentation(dj.Computed):
definition = """ # structural segmentation of a 2-d field (using the affine registration)
-> Segmentation.proj(stack_session='session', stacksegm_channel='channel')
-> Registration
---
segm_field : longblob # field (image x height) of cell ids at 1 um/px
"""
class StackUnit(dj.Part):
definition = """ # single unit from the stack that appears in the field
-> master
sunit_id : int # id in the stack segmentation
---
depth : int # (um) size in z
height : int # (um) size in y
width : int # (um) size in x
volume : float # (um) volume of the 3-d unit
area : float # (um) area of the 2-d mask
sunit_z : float # (um) centroid for the 3d unit in the motor coordinate system
sunit_y : float # (um) centroid for the 3d unit in the motor coordinate system
sunit_x : float # (um) centroid for the 3d unit in the motor coordinate system
mask_z : float # (um) centroid for the 2d mask in the motor coordinate system
mask_y : float # (um) centroid for the 2d mask in the motor coordinate system
mask_x : float # (um) centroid for the 2d mask in the motor coordinate system
distance : float # (um) euclidean distance between centroid of 2-d mask and 3-d unit
"""
def _make_tuples(self, key):
from skimage import measure
# Get structural segmentation
stack_key = {'animal_id': key['animal_id'], 'session': key['stack_session'],
'stack_idx': key['stack_idx'], 'volume_id': key['volume_id'],
'channel': key['stacksegm_channel']}
instance = (Segmentation & stack_key).fetch1('segmentation')
# Get segmented field
grid = (Registration & key).get_grid(type='affine', desired_res=1)
stack_center = np.array((CorrectedStack & stack_key).fetch1('z', 'y', 'x'))
px_grid = (grid[..., ::-1] - stack_center - 0.5 + np.array(instance.shape) / 2)
segmented_field = ndimage.map_coordinates(instance, np.moveaxis(px_grid, -1, 0),
order=0) # nearest neighbor sampling
# Insert in FieldSegmentation
self.insert1({**key, 'segm_field': segmented_field})
# Insert each StackUnit
instance_props = measure.regionprops(instance)
instance_labels = np.array([p.label for p in instance_props])
for prop in measure.regionprops(segmented_field):
sunit_id = prop.label
instance_prop = instance_props[np.argmax(instance_labels == sunit_id)]
depth = (instance_prop.bbox[3] - instance_prop.bbox[0])
height = (instance_prop.bbox[4] - instance_prop.bbox[1])
width = (instance_prop.bbox[5] - instance_prop.bbox[2])
volume = instance_prop.area
sunit_z, sunit_y, sunit_x = (stack_center + np.array(instance_prop.centroid) -
np.array(instance.shape) / 2 + 0.5)
binary_sunit = segmented_field == sunit_id
area = np.count_nonzero(binary_sunit)
px_y, px_x = ndimage.measurements.center_of_mass(binary_sunit)
px_coords = np.array([[px_y], [px_x]])
mask_x, mask_y, mask_z = [ndimage.map_coordinates(grid[..., i], px_coords,
order=1)[0] for i in
range(3)]
distance = np.sqrt((sunit_z - mask_z) ** 2 + (sunit_y - mask_y) ** 2 +
(sunit_x - mask_x) ** 2)
# Insert in StackUnit
self.StackUnit.insert1({**key, 'sunit_id': sunit_id, 'depth': depth,
'height': height, 'width': width, 'volume': volume,
'area': area, 'sunit_z': sunit_z, 'sunit_y': sunit_y,
'sunit_x': sunit_x, 'mask_z': mask_z,
'mask_y': mask_y, 'mask_x': mask_x,
'distance': distance})
@schema
class RegistrationOverTime(dj.Computed):
definition = """ # register a field at different timepoints of recording
-> PreprocessedStack.proj(stack_session='session', stack_channel='channel')
-> RegistrationTask
"""
@property
def key_source(self):
stacks = PreprocessedStack.proj(stack_session='session', stack_channel='channel')
return stacks * RegistrationTask & {'registration_method': 5}
class Chunk(dj.Part):
definition = """ # single registered chunk
-> master
frame_num : int # frame number of the frame in the middle of this chunk
---
initial_frame : int # initial frame used in this chunk (1-based)
final_frame : int # final frame used in this chunk (1-based)
avg_chunk : longblob # average field used for registration
"""
def get_grid(self, type='nonrigid', desired_res=1):
# TODO: Taken verbatim from Registration (minor changes for formatting), refactor
""" Get registered grid for this registration. """
import torch
from .utils import registration
# Get field
field_key = self.proj(session='scan_session')
field_dims = (reso.ScanInfo & field_key or meso.ScanInfo.Field &
field_key).fetch1('um_height', 'um_width')
# Create grid at desired resolution
grid = registration.create_grid(field_dims, desired_res=desired_res) # h x w x 2
grid = torch.as_tensor(grid, dtype=torch.float32)
# Apply required transform
if type == 'rigid':
params = (RegistrationOverTime.Rigid & self).fetch1('reg_x', 'reg_y',
'reg_z')
delta_x, delta_y, delta_z = params
linear = torch.eye(3)[:, :2]
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'affine':
params = (RegistrationOverTime.Affine & self).fetch1('a11', 'a21', 'a31',
'a12', 'a22', 'a32',
'reg_x', 'reg_y',
'reg_z')
a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
pred_grid = registration.affine_product(grid, linear, translation)
elif type == 'nonrigid':
params = (RegistrationOverTime.NonRigid & self).fetch1('a11', 'a21',
'a31', 'a12',
'a22', 'a32',
'reg_x', 'reg_y',
'reg_z',
'landmarks',
'deformations')
rbf_radius = (RegistrationOverTime.Params & self).fetch1('rbf_radius')
(a11, a21, a31, a12, a22, a32, delta_x, delta_y, delta_z, landmarks,
deformations) = params
linear = torch.tensor([[a11, a12], [a21, a22], [a31, a32]])
translation = torch.tensor([delta_x, delta_y, delta_z])
landmarks = torch.from_numpy(landmarks)
deformations = torch.from_numpy(deformations)
affine_grid = registration.affine_product(grid, linear, translation)
grid_distances = torch.norm(grid.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
else:
raise PipelineException('Unrecognized registration.')
return pred_grid.numpy()
class Rigid(dj.Part):
definition = """ # rigid registration of a single chunk
-> RegistrationOverTime.Chunk
---
reg_x : float # (um) center of field in motor coordinate system
reg_y : float # (um) center of field in motor coordinate system
reg_z : float # (um) center of field in motor coordinate system
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Affine(dj.Part):
definition = """ # affine matrix learned via gradient ascent
-> RegistrationOverTime.Chunk
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class NonRigid(dj.Part):
definition = """ # affine plus deformation field learned via gradient descent
-> RegistrationOverTime.Chunk
---
a11 : float # (um) element in row 1, column 1 of the affine matrix
a21 : float # (um) element in row 2, column 1 of the affine matrix
a31 : float # (um) element in row 3, column 1 of the affine matrix
a12 : float # (um) element in row 1, column 2 of the affine matrix
a22 : float # (um) element in row 2, column 2 of the affine matrix
a32 : float # (um) element in row 3, column 2 of the affine matrix
reg_x : float # (um) element in row 1, column 4 of the affine matrix
reg_y : float # (um) element in row 2, column 4 of the affine matrix
reg_z : float # (um) element in row 3, column 4 of the affine matrix
landmarks : longblob # (um) x, y position of each landmark (num_landmarks x 2) assuming center of field is at (0, 0)
deformations : longblob # (um) x, y, z deformations per landmark (num_landmarks x 3)
score : float # cross-correlation score (-1 to 1)
reg_field : longblob # extracted field from the stack in the specified position
"""
class Params(dj.Part):
definition = """ # document some parameters used for the registration
-> master
---
rigid_zrange : int # microns above and below experimenter's estimate (in z) to search for rigid registration
lr_linear : float # learning rate for the linear part of the affine matrix
lr_translation : float # learning rate for the translation vector
affine_iters : int # number of iterations to learn the affine registration
random_seed : int # seed used to initialize landmark deformations
landmark_gap : int # number of microns between landmarks
rbf_radius : int # critical radius for the gaussian radial basis function
lr_deformations : float # learning rate for the deformation values
wd_deformations : float # regularization term to control size of the deformations
smoothness_factor : float # regularization term to control curvature of warping field
nonrigid_iters : int # number of iterations to optimize for the non-rigid parameters
"""
def make(self, key):
from .utils import registration
from .utils import enhancement
# Set params
rigid_zrange = 80 # microns to search above and below estimated z for rigid registration
lr_linear = 0.001 # learning rate / step size for the linear part of the affine matrix
lr_translation = 1 # learning rate / step size for the translation vector
affine_iters = 200 # number of optimization iterations to learn the affine parameters
random_seed = 1234 # seed for torch random number generator (used to initialize deformations)
landmark_gap = 100 # spacing for the landmarks
rbf_radius = 150 # critical radius for the gaussian rbf
lr_deformations = 0.1 # learning rate / step size for deformation values
wd_deformations = 1e-4 # weight decay for deformations; controls their size
smoothness_factor = 0.01 # factor to keep the deformation field smooth
nonrigid_iters = 200 # number of optimization iterations for the nonrigid parameters
# Get enhanced stack
stack_key = {'animal_id': key['animal_id'], 'session': key['stack_session'],
'stack_idx': key['stack_idx'], 'volume_id': key['volume_id'],
'channel': key['stack_channel']}
original_stack = (PreprocessedStack & stack_key).fetch1('resized')
stack = (PreprocessedStack & stack_key).fetch1('sharpened')
stack = stack[5:-5, 15:-15, 15:-15] # drop some edges
# Get corrected scan
field_key = {'animal_id': key['animal_id'], 'session': key['scan_session'],
'scan_idx': key['scan_idx'], 'field': key['field'],
'channel': key['scan_channel']}
pipe = (reso if reso.ScanInfo & field_key else meso if meso.ScanInfo & field_key
else None)
scan = RegistrationOverTime._get_corrected_scan(field_key)
# Get initial estimate of field depth from experimenters
field_z = (pipe.ScanInfo.Field & field_key).fetch1('z')
stack_z = (CorrectedStack & stack_key).fetch1('z')
z_limits = stack_z - stack.shape[0] / 2, stack_z + stack.shape[0] / 2
if field_z < z_limits[0] or field_z > z_limits[1]:
print('Warning: Estimated depth ({}) outside stack range ({}-{}).'.format(
field_z, *z_limits))
# Compute best chunk size: each lasts the same (~15 minutes)
fps = (pipe.ScanInfo & field_key).fetch1('fps')
num_frames = scan.shape[-1]
overlap = int(round(3 * 60 * fps)) # ~ 3 minutes
num_chunks = int(np.ceil((num_frames - overlap) / (15 * 60 * fps - overlap)))
chunk_size = int(np.floor((num_frames - overlap) / num_chunks + overlap)) # *
# * distributes frames in the last (incomplete) chunk to the other chunks
# Insert in RegistrationOverTime and Params (once per field)
self.insert1(key)
self.Params.insert1(
{**key, 'rigid_zrange': rigid_zrange, 'lr_linear': lr_linear,
'lr_translation': lr_translation, 'affine_iters': affine_iters,
'random_seed': random_seed, 'landmark_gap': landmark_gap,
'rbf_radius': rbf_radius, 'lr_deformations': lr_deformations,
'wd_deformations': wd_deformations, 'smoothness_factor': smoothness_factor,
'nonrigid_iters': nonrigid_iters})
# Iterate over chunks
for initial_frame in range(0, num_frames - chunk_size, chunk_size - overlap):
# Get next chunk
final_frame = initial_frame + chunk_size
chunk = scan[..., initial_frame: final_frame]
# Enhance field
field_dims = ((reso.ScanInfo if pipe == reso else meso.ScanInfo.Field) &
field_key).fetch1('um_height', 'um_width')
original_field = registration.resize(chunk.mean(-1), field_dims,
desired_res=1)
field = enhancement.sharpen_2pimage(enhancement.lcn(original_field, 15), 1)
field = field[15:-15, 15:-15] # drop some edges
# TODO: From here until Insert is taken verbatim from Registration, refactor
# RIGID REGISTRATION
from skimage import feature
# Run registration with no rotations
px_z = field_z - stack_z + stack.shape[0] / 2 - 0.5
mini_stack = stack[max(0, int(round(px_z - rigid_zrange))): int(round(
px_z + rigid_zrange))]
corrs = np.stack([feature.match_template(s, field, pad_input=True) for s in
mini_stack])
smooth_corrs = ndimage.gaussian_filter(corrs, 0.7)
# Get results
min_z = max(0, int(round(px_z - rigid_zrange)))
min_y = int(round(0.05 * stack.shape[1]))
min_x = int(round(0.05 * stack.shape[2]))
mini_corrs = smooth_corrs[:, min_y:-min_y, min_x:-min_x]
rig_z, rig_y, rig_x = np.unravel_index(np.argmax(mini_corrs),
mini_corrs.shape)
# Rewrite coordinates with respect to original z
rig_z = (min_z + rig_z + 0.5) - stack.shape[0] / 2
rig_y = (min_y + rig_y + 0.5) - stack.shape[1] / 2
rig_x = (min_x + rig_x + 0.5) - stack.shape[2] / 2
del px_z, mini_stack, corrs, smooth_corrs, min_z, min_y, min_x, mini_corrs
# AFFINE REGISTRATION
import torch
from torch import optim
import torch.nn.functional as F
def sample_grid(volume, grid):
""" Volume is a d x h x w arrray, grid is a d1 x d2 x 3 (x, y, z)
coordinates and output is a d1 x d2 array"""
norm_factor = torch.as_tensor([s / 2 - 0.5 for s in volume.shape[::-1]])
norm_grid = grid / norm_factor # between -1 and 1
resampled = F.grid_sample(volume.view(1, 1, *volume.shape),
norm_grid.view(1, 1, *norm_grid.shape),
padding_mode='zeros')
return resampled.squeeze()
# Create field grid (height x width x 2)
grid = registration.create_grid(field.shape)
# Create torch tensors
stack_ = torch.as_tensor(stack, dtype=torch.float32)
field_ = torch.as_tensor(field, dtype=torch.float32)
grid_ = torch.as_tensor(grid, dtype=torch.float32)
# Define parameters and optimizer
linear = torch.nn.Parameter(torch.eye(3)[:, :2]) # first two columns of rotation matrix
translation = torch.nn.Parameter(torch.tensor([rig_x, rig_y, rig_z])) # translation vector
affine_optimizer = optim.Adam([{'params': linear, 'lr': lr_linear},
{'params': translation, 'lr': lr_translation}])
# Optimize
for i in range(affine_iters):
# Zero gradients
affine_optimizer.zero_grad()
# Compute gradients
pred_grid = registration.affine_product(grid_, linear, translation) # w x h x 3
pred_field = sample_grid(stack_, pred_grid)
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
print('Corr at iteration {}: {:5.4f}'.format(i, -corr_loss))
corr_loss.backward()
# Update
affine_optimizer.step()
# Save them (originals will be modified during non-rigid registration)
affine_linear = linear.detach().clone()
affine_translation = translation.detach().clone()
# NON-RIGID REGISTRATION
# Inspired by the the Demon's Algorithm (Thirion, 1998)
torch.manual_seed(random_seed) # we use random initialization below
# Create landmarks (and their corresponding deformations)
first_y = int(round((field.shape[0] % landmark_gap) / 2))
first_x = int(round((field.shape[1] % landmark_gap) / 2))
landmarks = grid_[first_x::landmark_gap,
first_y::landmark_gap].contiguous().view(-1, 2) # num_landmarks x 2
# Compute rbf scores between landmarks and grid coordinates and between landmarks
grid_distances = torch.norm(grid_.unsqueeze(-2) - landmarks, dim=-1)
grid_scores = torch.exp(-(grid_distances * (1 / rbf_radius)) ** 2) # w x h x num_landmarks
landmark_distances = torch.norm(landmarks.unsqueeze(-2) - landmarks, dim=-1)
landmark_scores = torch.exp(-(landmark_distances * (1 / 200)) ** 2) # num_landmarks x num_landmarks
# Define parameters and optimizer
deformations = torch.nn.Parameter(torch.randn((landmarks.shape[0], 3)) / 10) # N(0, 0.1)
nonrigid_optimizer = optim.Adam([deformations], lr=lr_deformations,
weight_decay=wd_deformations)
# Optimize
for i in range(nonrigid_iters):
# Zero gradients
affine_optimizer.zero_grad() # we reuse affine_optimizer so the affine matrix changes slowly
nonrigid_optimizer.zero_grad()
# Compute grid with radial basis
affine_grid = registration.affine_product(grid_, linear, translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, deformations))
pred_grid = affine_grid + warping_field
pred_field = sample_grid(stack_, pred_grid)
# Compute loss
corr_loss = -(pred_field * field_).sum() / (torch.norm(pred_field) *
torch.norm(field_))
# Compute cosine similarity between landmarks (and weight em by distance)
norm_deformations = deformations / torch.norm(deformations, dim=-1,
keepdim=True)
cosine_similarity = torch.mm(norm_deformations, norm_deformations.t())
reg_term = -((cosine_similarity * landmark_scores).sum() /
landmark_scores.sum())
# Compute gradients
loss = corr_loss + smoothness_factor * reg_term
print('Corr/loss at iteration {}: {:5.4f}/{:5.4f}'.format(i, -corr_loss,
loss))
loss.backward()
# Update
affine_optimizer.step()
nonrigid_optimizer.step()
# Save final results
nonrigid_linear = linear.detach().clone()
nonrigid_translation = translation.detach().clone()
nonrigid_landmarks = landmarks.clone()
nonrigid_deformations = deformations.detach().clone()
# COMPUTE SCORES (USING THE ENHANCED AND CROPPED VERSION OF THE FIELD)
# Rigid
pred_grid = registration.affine_product(grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
pred_field = sample_grid(stack_, pred_grid).numpy()
rig_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Affine
pred_grid = registration.affine_product(grid_, affine_linear,
affine_translation)
pred_field = sample_grid(stack_, pred_grid).numpy()
affine_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# Non-rigid
affine_grid = registration.affine_product(grid_, nonrigid_linear,
nonrigid_translation)
warping_field = torch.einsum('whl,lt->wht', (grid_scores, nonrigid_deformations))
pred_grid = affine_grid + warping_field
pred_field = sample_grid(stack_, pred_grid).numpy()
nonrigid_score = np.corrcoef(field.ravel(), pred_field.ravel())[0, 1]
# FIND FIELDS IN STACK
# Create grid of original size (h x w x 2)
original_grid = registration.create_grid(original_field.shape)
# Create torch tensors
original_stack_ = torch.as_tensor(original_stack, dtype=torch.float32)
original_grid_ = torch.as_tensor(original_grid, dtype=torch.float32)
# Rigid
pred_grid = registration.affine_product(original_grid_, torch.eye(3)[:, :2],
torch.tensor([rig_x, rig_y, rig_z]))
rig_field = sample_grid(original_stack_, pred_grid).numpy()
# Affine
pred_grid = registration.affine_product(original_grid_, affine_linear,
affine_translation)
affine_field = sample_grid(original_stack_, pred_grid).numpy()
# Non-rigid
affine_grid = registration.affine_product(original_grid_, nonrigid_linear,
nonrigid_translation)
original_grid_distances = torch.norm(original_grid_.unsqueeze(-2) -
nonrigid_landmarks, dim=-1)
original_grid_scores = torch.exp(-(original_grid_distances *
(1 / rbf_radius)) ** 2)
warping_field = torch.einsum('whl,lt->wht', (original_grid_scores,
nonrigid_deformations))
pred_grid = affine_grid + warping_field
nonrigid_field = sample_grid(original_stack_, pred_grid).numpy()
# Insert chunk
stack_z, stack_y, stack_x = (CorrectedStack & stack_key).fetch1('z', 'y', 'x')
frame_num = int(round((initial_frame + final_frame) / 2))
self.Chunk.insert1({**key, 'frame_num': frame_num + 1,
'initial_frame': initial_frame + 1,
'final_frame': final_frame, 'avg_chunk': original_field})
self.Rigid.insert1({**key, 'frame_num': frame_num + 1,
'reg_x': stack_x + rig_x, 'reg_y': stack_y + rig_y,
'reg_z': stack_z + rig_z, 'score': rig_score,
'reg_field': rig_field})
self.Affine.insert1({**key, 'frame_num': frame_num + 1,
'a11': affine_linear[0, 0].item(),
'a21': affine_linear[1, 0].item(),
'a31': affine_linear[2, 0].item(),
'a12': affine_linear[0, 1].item(),
'a22': affine_linear[1, 1].item(),
'a32': affine_linear[2, 1].item(),
'reg_x': stack_x + affine_translation[0].item(),
'reg_y': stack_y + affine_translation[1].item(),
'reg_z': stack_z + affine_translation[2].item(),
'score': affine_score,
'reg_field': affine_field})
self.NonRigid.insert1({**key, 'frame_num': frame_num + 1,
'a11': nonrigid_linear[0, 0].item(),
'a21': nonrigid_linear[1, 0].item(),
'a31': nonrigid_linear[2, 0].item(),
'a12': nonrigid_linear[0, 1].item(),
'a22': nonrigid_linear[1, 1].item(),
'a32': nonrigid_linear[2, 1].item(),
'reg_x': stack_x + nonrigid_translation[0].item(),
'reg_y': stack_y + nonrigid_translation[1].item(),
'reg_z': stack_z + nonrigid_translation[2].item(),
'landmarks': nonrigid_landmarks.numpy(),
'deformations': nonrigid_deformations.numpy(),
'score': nonrigid_score, 'reg_field': nonrigid_field})
# self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
frame_num, zs, scores = (self.Affine & key).fetch('frame_num', 'reg_z', 'score')
plt.plot(frame_num, -zs, zorder=1)
plt.scatter(frame_num, -zs, marker='*', s=scores * 70, zorder=2, color='r')
plt.title('Registration over time (star size represents confidence)')
plt.ylabel('z (surface at 0)')
plt.xlabel('Frames')
img_filename = '/tmp/{}.png'.format(key_hash(key))
plt.savefig(img_filename)
plt.close()
msg = ('registration over time of {animal_id}-{scan_session}-{scan_idx} field '
'{field} to {animal_id}-{stack_session}-{stack_idx}')
msg = msg.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key &
{'session': key['stack_session']})
slack_user.notify(file=img_filename, file_title=msg)
def _get_corrected_scan(key):
# Read scan
scan_filename = (experiment.Scan & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Get some params
pipe = reso if (reso.ScanInfo() & key) else meso
# Map: Correct scan in parallel
f = performance.parallel_correct_scan # function to map
raster_phase = (pipe.RasterCorrection & key).fetch1('raster_phase')
fill_fraction = (pipe.ScanInfo & key).fetch1('fill_fraction')
y_shifts, x_shifts = (pipe.MotionCorrection & key).fetch1('y_shifts', 'x_shifts')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'y_shifts': y_shifts, 'x_shifts': x_shifts}
results = performance.map_frames(f, scan, field_id=key['field'] - 1,
channel=key['channel'] - 1, kwargs=kwargs)
# Reduce: Make a single array (height x width x num_frames)
height, width, _ = results[0][1].shape
corrected_scan = np.zeros([height, width, scan.num_frames], dtype=np.float32)
for frames, chunk in results:
corrected_scan[..., frames] = chunk
return corrected_scan
def session_plot(self):
""" Create a registration plot for the session"""
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Check that plot is restricted to a single stack and a single session
regot_key = self.fetch('KEY', limit=1)[0]
stack_key = {n: regot_key[n] for n in ['animal_id', 'stack_session', 'stack_idx',
'volume_id']}
session_key = {n: regot_key[n] for n in ['animal_id', 'scan_session']}
if len(self & stack_key) != len(self):
raise PipelineException('Plot can only be generated for one stack at a time')
if len(self & session_key) != len(self):
raise PipelineException('Plot can only be generated for one session at a '
'time')
# Get field times and depths
ts = []
zs = []
session_ts = (experiment.Session & regot_key &
{'session': regot_key['scan_session']}).fetch1('session_ts')
for key in self.fetch('KEY'):
field_key = {'animal_id': key['animal_id'], 'session': key['scan_session'],
'scan_idx': key['scan_idx'], 'field': key['field']}
scan_ts = (experiment.Scan & field_key).fetch1('scan_ts')
fps = (reso.ScanInfo & field_key or meso.ScanInfo & field_key).fetch1('fps')
frame_nums, field_zs = (RegistrationOverTime.Affine & key).fetch('frame_num',
'reg_z')
field_ts = (scan_ts - session_ts).seconds + frame_nums / fps # in seconds
ts.append(field_ts)
zs.append(field_zs)
# Plot
fig = plt.figure(figsize=(20, 8))
for ts_, zs_ in zip(ts, zs):
plt.plot(ts_ / 3600, zs_)
plt.title('Registered zs for {animal_id}-{scan_session} into {animal_id}-'
'{stack_session}-{stack_idx} starting at {t}'.format(t=session_ts,
**regot_key))
plt.ylabel('Registered zs')
plt.xlabel('Hours')
# Plot formatting
plt.gca().invert_yaxis()
plt.gca().yaxis.set_major_locator(ticker.MultipleLocator(10))
plt.grid(linestyle='--', alpha=0.8)
return fig
@schema
class Drift(dj.Computed):
definition = """ # assuming a linear drift, compute the rate of drift (of the affine registration)
-> RegistrationOverTime
---
z_slope : float # (um/hour) drift of the center of the field
y_slope : float # (um/hour) drift of the center of the field
x_slope : float # (um/hour) drift of the center of the field
z_rmse : float # (um) root mean squared error of the fit
y_rmse : float # (um) root mean squared error of the fit
x_rmse : float # (um) root mean squared error of the fit
"""
@property
def key_source(self):
return RegistrationOverTime.aggr(RegistrationOverTime.Chunk.proj(),
nchunks='COUNT(*)') & 'nchunks > 1'
def _make_tuples(self, key):
from sklearn import linear_model
# Get drifts per axis
frame_nums, zs, ys, xs = (RegistrationOverTime.Affine & key).fetch('frame_num',
'reg_z', 'reg_y', 'reg_x')
# Get scan fps
field_key = {**key, 'session': key['scan_session']}
fps = (reso.ScanInfo() & field_key or meso.ScanInfo() & field_key).fetch1('fps')
# Fit a line through the values (robust regression)
slopes = []
rmses = []
X = frame_nums.reshape(-1, 1)
for y in [zs, ys, xs]:
model = linear_model.TheilSenRegressor()
model.fit(X, y)
slopes.append(model.coef_[0] * fps * 3600)
rmses.append(np.sqrt(np.mean(zs - model.predict(X)) ** 2))
self.insert1({**key, 'z_slope': slopes[0], 'y_slope': slopes[1],
'x_slope': slopes[2], 'z_rmse': rmses[0], 'y_rmse': rmses[1],
'x_rmse': rmses[2]})
def session_plot(self):
""" Create boxplots for the session (one per scan)."""
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Check that plot is restricted to a single stack and a single session
regot_key = self.fetch('KEY', limit=1)[0]
stack_key = {n: regot_key[n] for n in ['animal_id', 'stack_session', 'stack_idx',
'volume_id']}
session_key = {n: regot_key[n] for n in ['animal_id', 'scan_session']}
if len(self & stack_key) != len(self):
raise PipelineException('Plot can only be generated for one stack at a time')
if len(self & session_key) != len(self):
raise PipelineException('Plot can only be generated for one session at a '
'time')
# Get field times and depths
z_slopes = []
scan_idxs = np.unique(self.fetch('scan_idx'))
for scan_idx in scan_idxs:
scan_slopes = (self & {**session_key, 'scan_idx': scan_idx}).fetch('z_slope')
z_slopes.append(scan_slopes)
# Plot
fig = plt.figure(figsize=(7, 4))
plt.boxplot(z_slopes)
plt.title('Z drift for {animal_id}-{scan_session} into {animal_id}-'
'{stack_session}-{stack_idx}'.format(**regot_key))
plt.ylabel('Z drift (um/hour)')
plt.xlabel('Scans')
plt.xticks(range(1, len(scan_idxs) + 1), scan_idxs)
# Plot formatting
plt.gca().invert_yaxis()
plt.gca().yaxis.set_major_locator(ticker.MultipleLocator(5))
plt.grid(linestyle='--', alpha=0.8)
return fig
@schema
class StackSet(dj.Computed):
definition = """ # match segmented masks by proximity in the stack
-> CorrectedStack.proj(stack_session='session') # animal_id, stack_session, stack_idx, volume_id
-> shared.RegistrationMethod
-> shared.SegmentationMethod
---
min_distance :tinyint # distance used as threshold to accept two masks as the same
max_height :tinyint # maximum allowed height of a joint mask
"""
@property
def key_source(self):
return (CorrectedStack.proj(stack_session='session') *
shared.RegistrationMethod.proj() * shared.SegmentationMethod.proj() &
Registration & {'segmentation_method': 6})
class Unit(dj.Part):
definition = """ # a unit in the stack
-> master
munit_id :int # unique id in the stack
---
munit_x :float # (um) position of centroid in motor coordinate system
munit_y :float # (um) position of centroid in motor coordinate system
munit_z :float # (um) position of centroid in motor coordinate system
"""
class Match(dj.Part):
definition = """ # Scan unit to stack unit match (n:1 relation)
-> master
-> experiment.Scan.proj(scan_session='session') # animal_id, scan_session, scan_idx
unit_id :int # unit id from ScanSet.Unit
---
-> StackSet.Unit
"""
class MatchedUnit():
""" Coordinates for a set of masks that form a single cell."""
def __init__(self, key, x, y, z, plane_id):
self.keys = [key]
self.xs = [x]
self.ys = [y]
self.zs = [z]
self.plane_ids = [plane_id]
self.centroid = [x, y, z]
def join_with(self, other):
self.keys += other.keys
self.xs += other.xs
self.ys += other.ys
self.zs += other.zs
self.plane_ids += other.plane_ids
self.centroid = [np.mean(self.xs), np.mean(self.ys), np.mean(self.zs)]
def __lt__(self, other):
""" Used for sorting. """
return True
def make(self, key):
from scipy.spatial import distance
import bisect
# Set some params
min_distance = 10
max_height = 20
# Create list of units
units = [] # stands for matched units
for field in Registration & key:
# Edge case: when two channels are registered, we don't know which to use
if len(Registration.proj(ignore='scan_channel') & field) > 1:
msg = ('More than one channel was registered for {animal_id}-'
'{scan_session}-{scan_idx} field {field}'.format(**field))
raise PipelineException(msg)
# Get registered grid
field_key = {'animal_id': field['animal_id'],
'session': field['scan_session'], 'scan_idx': field['scan_idx'],
'field': field['field']}
pipe = reso if reso.ScanInfo & field_key else meso
um_per_px = ((reso.ScanInfo if pipe == reso else meso.ScanInfo.Field) &
field_key).microns_per_pixel
grid = (Registration & field).get_grid(type='affine', desired_res=um_per_px)
# Create cell objects
for channel_key in (pipe.ScanSet & field_key &
{'segmentation_method': key['segmentation_method']}): # *
somas = pipe.MaskClassification.Type & {'type': 'soma'}
field_somas = pipe.ScanSet.Unit & channel_key & somas
unit_keys, xs, ys = (pipe.ScanSet.UnitInfo & field_somas).fetch('KEY',
'px_x', 'px_y')
px_coords = np.stack([ys, xs])
xs, ys, zs = [ndimage.map_coordinates(grid[..., i], px_coords, order=1)
for i in range(3)]
units += [StackSet.MatchedUnit(*args, key_hash(channel_key)) for args in
zip(unit_keys, xs, ys, zs)]
# * Separating masks per channel allows masks in diff channels to be matched
print(len(units), 'initial units')
def find_close_units(centroid, centroids, min_distance):
""" Finds centroids that are closer than min_distance to centroid. """
dists = distance.cdist(np.expand_dims(centroid, 0), centroids)
indices = np.flatnonzero(dists < min_distance)
return indices, dists[0, indices]
def is_valid(unit1, unit2, max_height):
""" Checks that units belong to different fields and that the resulting unit
would not be bigger than 20 microns."""
different_fields = len(set(unit1.plane_ids) & set(unit2.plane_ids)) == 0
acceptable_height = (max(unit1.zs + unit2.zs) - min(
unit1.zs + unit2.zs)) < max_height
return different_fields and acceptable_height
# Create distance matrix
# For memory efficiency we use an adjacency list with only the units at less than 10 microns
centroids = np.stack([u.centroid for u in units])
distance_list = [] # list of triples (distance, unit1, unit2)
for i in range(len(units)):
indices, distances = find_close_units(centroids[i], centroids[i + 1:],
min_distance)
for dist, j in zip(distances, i + 1 + indices):
if is_valid(units[i], units[j], max_height):
bisect.insort(distance_list, (dist, units[i], units[j]))
print(len(distance_list), 'possible pairings')
# Join units
while (len(distance_list) > 0):
# Get next pair of units
d, unit1, unit2 = distance_list.pop(0)
# Remove them from lists
units.remove(unit1)
units.remove(unit2)
f = lambda x: (unit1 not in x[1:]) and (unit2 not in x[1:])
distance_list = list(filter(f, distance_list))
# Join them
unit1.join_with(unit2)
# Recalculate distances
centroids = [u.centroid for u in units]
indices, distances = find_close_units(unit1.centroid, centroids, min_distance)
for dist, j in zip(distances, indices):
if is_valid(unit1, units[j], max_height):
bisect.insort(distance_list, (d, unit1, units[j]))
# Insert new unit
units.append(unit1)
print(len(units), 'number of final masks')
# Insert
self.insert1({**key, 'min_distance': min_distance, 'max_height': max_height})
for munit_id, munit in zip(itertools.count(start=1), units):
new_unit = {**key, 'munit_id': munit_id, 'munit_x': munit.centroid[0],
'munit_y': munit.centroid[1], 'munit_z': munit.centroid[2]}
self.Unit().insert1(new_unit)
for subunit_key in munit.keys:
new_match = {**key, 'munit_id': munit_id, **subunit_key,
'scan_session': subunit_key['session']}
self.Match().insert1(new_match, ignore_extra_fields=True)
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
fig = (StackSet() & key).plot_centroids3d()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename)
plt.close(fig)
msg = ('StackSet for {animal_id}-{stack_session}-{stack_idx}: {num_units} final '
'units').format(**key, num_units=len(self.Unit & key))
slack_user = notify.SlackUser & (experiment.Session & key &
{'session': key['stack_session']})
slack_user.notify(file=img_filename, file_title=msg)
def plot_centroids3d(self):
""" Plots the centroids of all units in the motor coordinate system (in microns)
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from mpl_toolkits.mplot3d import Axes3D
# Get centroids
xs, ys, zs = (StackSet.Unit & self).fetch('munit_x', 'munit_y', 'munit_z')
# Plot
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
ax.scatter(xs, ys, zs, alpha=0.5)
ax.invert_zaxis()
ax.set_xlabel('x (um)')
ax.set_ylabel('y (um)')
ax.set_zlabel('z (um)')
return fig
@schema
class Area(dj.Computed):
definition = """ # transform area masks from annotated retinotopic maps into stack space
-> PreprocessedStack.proj(stack_session='session',stack_channel='channel')
-> experiment.Scan.proj(scan_session='session')
-> shared.Channel.proj(scan_channel='channel')
-> shared.RegistrationMethod
-> shared.AreaMaskMethod
ret_idx : smallint # retinotopy map index for each animal
ret_hash : varchar(32) # single attribute representation of the key (used to avoid going over 16 attributes in the key)
---
"""
class Mask(dj.Part):
definition = """ # mask per area indicating membership
-> master
-> anatomy.Area
---
mask : blob # 2D mask of pixel area membership
"""
@property
def key_source(self):
# anatomy code outputs masks per field for aim 2pScan and per concatenated plane for aim widefield
map_rel = (anatomy.AreaMask.proj('ret_idx', scan_session='session') &
(experiment.Scan & 'aim="2pScan"').proj(scan_session='session'))
stack_rel = Registration & 'registration_method = 5'
heading = list(set(list(map_rel.heading.attributes) + list(stack_rel.heading.attributes)))
heading.remove('field')
heading.remove('brain_area')
key_source = dj.U(*heading, 'mask_method') & (map_rel * stack_rel * shared.AreaMaskMethod)
return key_source
def make(self, key):
from scipy.interpolate import griddata
import cv2
#same as key source but retains brain area attribute
key['ret_hash'] = key_hash(key)
map_rel = (anatomy.AreaMask.proj('ret_idx', scan_session='session') &
(experiment.Scan & 'aim="2pScan"').proj(stack_session='session'))
stack_rel = Registration & 'registration_method = 5'
heading = list(set(list(map_rel.heading.attributes) + list(stack_rel.heading.attributes)))
heading.remove('field')
area_keys = (dj.U(*heading, 'mask_method') & (map_rel * stack_rel * shared.AreaMaskMethod) & key).fetch('KEY')
fetch_str = ['x', 'y', 'um_width', 'um_height', 'px_width', 'px_height']
stack_rel = CorrectedStack.proj(*fetch_str, stack_session='session') & key
cent_x, cent_y, um_w, um_h, px_w, px_h = stack_rel.fetch1(*fetch_str)
# subtract edges so that all coordinates are relative to the field
stack_edges = np.array((cent_x - um_w / 2, cent_y - um_h / 2))
stack_px_dims = np.array((px_w, px_h))
stack_um_dims = np.array((um_w, um_h))
# 0.5 displacement returns the center of each pixel
stack_px_grid = np.meshgrid(*[np.arange(d) + 0.5 for d in stack_px_dims])
# for each area, transfer mask from all fields into the stack
area_masks = []
for area_key in area_keys:
mask_rel = anatomy.AreaMask & area_key
field_keys, masks = mask_rel.fetch('KEY', 'mask')
stack_masks = []
for field_key, field_mask in zip(field_keys, masks):
field_res = (meso.ScanInfo.Field & field_key).microns_per_pixel
grid_key = {**key, 'field': field_key['field']}
# fetch transformation grid using built in function
field2stack_um = (Registration & grid_key).get_grid(type='affine', desired_res=field_res)
field2stack_um = (field2stack_um[..., :2]).transpose([2, 0, 1])
# convert transformation grid into stack pixel space
field2stack_px = [(grid - edge) * px_per_um for grid, edge, px_per_um
in zip(field2stack_um, stack_edges, stack_px_dims / stack_um_dims)]
grid_locs = np.array([f2s.ravel() for f2s in field2stack_px]).T
grid_vals = field_mask.ravel()
grid_query = np.array([stack_grid.ravel() for stack_grid in stack_px_grid]).T
# griddata because scipy.interpolate.interp2d wasn't working for some reason
# linear because nearest neighbor doesn't handle nans at the edge of the image
stack_mask = griddata(grid_locs, grid_vals, grid_query, method='linear')
stack_mask = np.round(np.reshape(stack_mask, (px_h, px_w)))
stack_masks.append(stack_mask)
# flatten all masks for area
stack_masks = np.array(stack_masks)
stack_masks[np.isnan(stack_masks)] = 0
area_mask = np.max(stack_masks, axis=0)
# close gaps in mask with 100 um kernel
kernel_width = 100
kernel = np.ones(np.round(kernel_width * (stack_px_dims / stack_um_dims)).astype(int))
area_mask = cv2.morphologyEx(area_mask, cv2.MORPH_CLOSE, kernel)
area_masks.append(area_mask)
# locate areas where masks overlap and set to nan
overlap_locs = np.sum(area_masks, axis=0) > 1
# create reference map of non-overlapping area masks
mod_masks = np.stack(area_masks.copy())
mod_masks[:, overlap_locs] = np.nan
ref_mask = np.max([mm * (i + 1) for i, mm in enumerate(mod_masks)], axis=0)
# interpolate overlap pixels into reference mask
non_nan_idx = np.invert(np.isnan(ref_mask))
grid_locs = np.array([stack_grid[non_nan_idx].ravel() for stack_grid in stack_px_grid]).T
grid_vals = ref_mask[non_nan_idx].ravel()
grid_query = np.array([stack_grid[overlap_locs] for stack_grid in stack_px_grid]).T
mask_assignments = griddata(grid_locs, grid_vals, grid_query, method='nearest')
for loc, assignment in zip((np.array(grid_query) - 0.5).astype(int), mask_assignments):
mod_masks[:, loc[1], loc[0]] = 0
mod_masks[int(assignment - 1)][loc[1]][loc[0]] = 1
area_keys = [{**area_key,**key,'mask': mod_mask} for area_key, mod_mask in zip(area_keys, mod_masks)]
self.insert1(key)
self.Mask.insert(area_keys)
| lgpl-3.0 |
sunil07t/e-mission-server | emission/analysis/result/metrics/simple_metrics.py | 1 | 1587 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import numpy as np
import logging
import pandas as pd
def get_summary_fn(key):
summary_fn_map = {
"count": get_count,
"distance": get_distance,
"duration": get_duration,
"median_speed": get_median_speed
}
return summary_fn_map[key]
def get_count(mode_section_grouped_df):
ret_dict = {}
for (mode, mode_section_df) in mode_section_grouped_df:
ret_dict[mode] = len(mode_section_df)
return ret_dict
def get_distance(mode_section_grouped_df):
ret_dict = {}
for (mode, mode_section_df) in mode_section_grouped_df:
ret_dict[mode] = mode_section_df.distance.sum()
return ret_dict
def get_duration(mode_section_grouped_df):
ret_dict = {}
for (mode, mode_section_df) in mode_section_grouped_df:
ret_dict[mode] = mode_section_df.duration.sum()
return ret_dict
def get_median_speed(mode_section_grouped_df):
ret_dict = {}
for (mode, mode_section_df) in mode_section_grouped_df:
median_speeds = [pd.Series(sl).dropna().median() for sl
in mode_section_df.speeds]
mode_median = pd.Series(median_speeds).dropna().median()
if np.isnan(mode_median):
logging.debug("still found nan for mode %s, skipping")
else:
ret_dict[mode] = mode_median
return ret_dict
| bsd-3-clause |
wiheto/expyriment_psychology_experiments | visualsearch/plot_visualsearch.py | 1 | 1920 | #This file takes the results in "results.csv" and plots them, saving it as plot.png
#Must be run in visualsearch directory
#Just run "python plot_visualsearch.py" in terminal and you should get a figure automatically generated (with a few warnings printed)
#Last updated December 5, 2016, by William H Thompson
import pandas as pd
import matplotlib.pyplot as plt
#load results
results=pd.read_csv('./results.csv')
results=results[['Tpresent','featuretypes','featureN','rt']]
#create and plot new figure
fig,ax = plt.subplots(1)
#Sort the results. (This is not the best way to do it, but fine for now)
popout_present=results.where(results.Tpresent==1).where(results.featuretypes==1).dropna().sort('featureN').groupby('featureN').mean()
popout_absent=results.where(results.Tpresent==0).where(results.featuretypes==1).dropna().sort('featureN').groupby('featureN').mean()
search_present=results.where(results.Tpresent==1).where(results.featuretypes==3).dropna().sort('featureN').groupby('featureN').mean()
search_absent=results.where(results.Tpresent==0).where(results.featuretypes==3).dropna().sort('featureN').groupby('featureN').mean()
#Plot the results. Change color and linestyle if wanted.
pp=ax.plot(popout_present.index.values,popout_present.rt,color='red')
pa=ax.plot(popout_absent.index.values,popout_absent.rt,color='red',linestyle='--')
sp=ax.plot(search_present.index.values,search_present.rt,color='blue')
sa=ax.plot(search_absent.index.values,search_absent.rt,color='blue',linestyle='--')
#Change labels x and y axis (if wanted)
ax.set_xlabel('Number of items')
ax.set_ylabel('Reaction Time (ms)')
#Change the x and y axis limits (if wanted)
ax.set_ylim(0,5000)
ax.set_xlim(0,60)
#Change figure legend (if wanted)
ax.legend((pp[0],pa[0],sp[0],sa[0]),('Pop Out (T present)','1 distractor (T absent)', '3 distractors (T present)', '3 distractors (T absent)'))
#Save figure.
fig.savefig('./plot.png')
| mit |
airbnb/superset | superset/tasks/alerts/observer.py | 1 | 3274 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from datetime import datetime
from typing import Optional
import pandas as pd
from sqlalchemy.orm import Session
from superset import jinja_context
from superset.models.alerts import Alert, SQLObservation
logger = logging.getLogger("tasks.email_reports")
# Session needs to be passed along in the celery workers and db.session cannot be used.
# For more info see: https://github.com/apache/incubator-superset/issues/10530
def observe(alert_id: int, session: Session) -> Optional[str]:
"""
Runs the SQL query in an alert's SQLObserver and then
stores the result in a SQLObservation.
Returns an error message if the observer value was not valid
"""
alert = session.query(Alert).filter_by(id=alert_id).one()
sql_observer = alert.sql_observer[0]
value = None
tp = jinja_context.get_template_processor(database=sql_observer.database)
rendered_sql = tp.process_template(sql_observer.sql)
df = sql_observer.database.get_df(rendered_sql)
error_msg = validate_observer_result(df, alert.id, alert.label)
if not error_msg and not df.empty and df.to_records()[0][1] is not None:
value = float(df.to_records()[0][1])
observation = SQLObservation(
observer_id=sql_observer.id,
alert_id=alert_id,
dttm=datetime.utcnow(),
value=value,
error_msg=error_msg,
)
session.add(observation)
session.commit()
return error_msg
def validate_observer_result(
sql_result: pd.DataFrame, alert_id: int, alert_label: str
) -> Optional[str]:
"""
Verifies if a DataFrame SQL query result to see if
it contains a valid value for a SQLObservation.
Returns an error message if the result is invalid.
"""
try:
if sql_result.empty:
# empty results are used for the not null validator
return None
rows = sql_result.to_records()
assert (
len(rows) == 1
), f"Observer for alert <{alert_id}:{alert_label}> returned more than 1 row"
assert (
len(rows[0]) == 2
), f"Observer for alert <{alert_id}:{alert_label}> returned more than 1 column"
if rows[0][1] is None:
return None
float(rows[0][1])
except AssertionError as error:
return str(error)
except (TypeError, ValueError):
return (
f"Observer for alert <{alert_id}:{alert_label}> returned a non-number value"
)
return None
| apache-2.0 |
ptkool/spark | python/pyspark/testing/sqlutils.py | 9 | 7813 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import shutil
import tempfile
from contextlib import contextmanager
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, DoubleType, UserDefinedType, Row
from pyspark.testing.utils import ReusedPySparkTestCase
from pyspark.util import _exception_message
pandas_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
pandas_requirement_message = _exception_message(e)
pyarrow_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
pyarrow_requirement_message = _exception_message(e)
test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
test_not_compiled_message = _exception_message(e)
have_pandas = pandas_requirement_message is None
have_pyarrow = pyarrow_requirement_message is None
test_compiled = test_not_compiled_message is None
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
@contextmanager
def database(self, *databases):
"""
A convenient context manager to test with some specific databases. This drops the given
databases if it exists and sets current database to "default" when it exits.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for db in databases:
self.spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db)
self.spark.catalog.setCurrentDatabase("default")
@contextmanager
def table(self, *tables):
"""
A convenient context manager to test with some specific tables. This drops the given tables
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for t in tables:
self.spark.sql("DROP TABLE IF EXISTS %s" % t)
@contextmanager
def tempView(self, *views):
"""
A convenient context manager to test with some specific views. This drops the given views
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for v in views:
self.spark.catalog.dropTempView(v)
@contextmanager
def function(self, *functions):
"""
A convenient context manager to test with some specific functions. This drops the given
functions if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for f in functions:
self.spark.sql("DROP FUNCTION IF EXISTS %s" % f)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super(ReusedSQLTestCase, cls).setUpClass()
cls.spark = SparkSession(cls.sc)
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
super(ReusedSQLTestCase, cls).tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
| apache-2.0 |
RationalAsh/pattern_recognition_assignments | assignment-3/python_sol.py | 1 | 5908 | #!/usr/bin/python
import numpy as np
from numpy import dot, random
import matplotlib.pyplot as plt
import time
from scipy.io import loadmat
def softmax(x):
exps = np.nan_to_num(np.exp(x))
return exps/np.nan_to_num(np.sum(exps))
class classifier(object):
def __init__(self, input_size, classes, debug=True):
'''Initialize the classifier with the input vector size
and the number of classes required'''
self.input_size = input_size
self.classes = classes
self.W = random.randn(input_size, classes)
self.b = random.randn(classes, 1)
self.DEBUG = debug
self.cost_over_time = np.zeros(100)
def setDebug(lev=True):
self.DEBUG = lev
def getCostOverTime():
return self.cost_over_time
def Y(self, train_data):
'''The model that predicts the class of the input vectors using
the current parmeters.'''
a = dot(train_data, self.W) + np.tile(self.b.flatten(), (len(train_data), 1))
return np.array([softmax(x) for x in a])
def costf(self, train_data, train_targets):
'''The traindata should contain the training inputs and
train_targets the target vectors. Evaluates the cross entropy cost
with the current set of data and parameters'''
Y = self.Y(train_data)
J = -sum([dot(t, ly) for t,ly in zip(train_targets, np.nan_to_num(np.log(np.nan_to_num(Y))))])
return J
def grad_costf(self, train_data, train_targets):
'''Computes the gradient of the cost function for a batch. This one was hell
to calculate by hand but I did it.'''
Y = self.Y(train_data)
gradW = dot(train_data.T, (Y - train_targets))
gradb = np.reshape(np.sum(Y - train_targets, axis=0), (self.classes, 1))
return gradW, gradb
def GD(self, train_data, train_targets, epochs=30, eta=0.01):
'''Trains the classifier using gradient descent. Uses the entire
dataset for a single epoch. Maybe I\'ll implement the stochastic
version soon.'''
#Reserve the array
self.cost_over_time = np.zeros(epochs)
#Start the training
for i in range(epochs):
print("Training Epoch %d..."%(i))
gradW, gradb = self.grad_costf(train_data, train_targets)
self.W = self.W - eta*gradW
self.b = self.b - eta*gradb
if self.DEBUG:
cost = self.costf(train_data, train_targets)
self.cost_over_time[i] = cost
print("Cost: "+str(cost))
print("Done")
def SGD(self, train_data, train_targets, batch_size=10, epochs=30, eta=0.01):
'''Trains the data using stochastic gradient descent.'''
self.cost_over_time = np.zeros(epochs)
for i in range(epochs):
print("Training Epoch %d..."%(i))
#Split the data into mini batches
NROWS = train_data.shape[0]
ROWS = [n for n in range(NROWS)]
random.shuffle(ROWS)
batches = [ROWS[n:n+batch_size] for n in range(0,NROWS,batch_size)]
for batch in batches:
#Compute the gradient for the mini batches
gradW, gradb = self.grad_costf(train_data[batch,:], train_targets[batch,:])
#Do gradient descent for each of the mini batches
self.W = self.W - eta*gradW
self.b = self.b - eta*gradb
if self.DEBUG:
cost = self.costf(train_data, train_targets)
self.cost_over_time[i] = cost
print("Cost: "+str(cost))
print("Done")
def evalData(self, test_data, test_targets):
'''Takes the testing data and calculates the number of
incorrectly classified inputs'''
Y = self.Y(test_data)
TOTAL = test_data.shape[0]
corrects = np.array(np.argmax(Y, axis=1) == np.argmax(test_targets, axis=1), dtype=float)
pcor = 100*sum(corrects)/TOTAL
print("Percentage correctly Classified: "+str(pcor))
if __name__ == '__main__':
#Extract the data from the file and prep it
DATA = loadmat('TRAINTEST2D.mat')
CL1 = DATA['TRAIN'][0][0][0][0].T
CL2 = DATA['TRAIN'][0][0][0][1].T
CL3 = DATA['TRAIN'][0][0][0][2].T
CL4 = DATA['TRAIN'][0][0][0][3].T
t1 = np.tile([1,0,0,0], (CL1.shape[0],1))
t2 = np.tile([0,1,0,0], (CL2.shape[0],1))
t3 = np.tile([0,0,1,0], (CL3.shape[0],1))
t4 = np.tile([0,0,0,1], (CL4.shape[0],1))
X = np.vstack((CL1,CL2,CL3,CL4))
T = np.vstack((t1,t2,t3,t4))
#Plot the unclassified data
plt.scatter(CL1[:,0], CL1[:,1], marker='x', c='r')
plt.scatter(CL2[:,0], CL2[:,1], marker='o', c='g')
plt.scatter(CL3[:,0], CL3[:,1], marker='s', c='b')
plt.scatter(CL4[:,0], CL4[:,1], marker='^', c='y')
plt.title('Scatter plot of the raw data')
#Initialize the classifier
clf = classifier(2, 4)
#Train the classifier
clf.SGD(X, T, epochs=30, eta=0.1)
#Get the testing data
# CL1 = DATA['TEST'][0][0][0][0].T
# CL2 = DATA['TEST'][0][0][0][1].T
# CL3 = DATA['TEST'][0][0][0][2].T
# CL4 = DATA['TEST'][0][0][0][3].T
#Trying to visualize the decision boundary
h = 0.05
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = np.argmax(clf.Y(np.c_[xx.ravel(), yy.ravel()]), axis=1)
# Put the result into a color plot
plt.figure()
plt.title('Plot of the decision boundaries')
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.argmax(T,axis=1), cmap=plt.cm.Paired)
plt.show()
| mit |
ArnaudKOPP/TransCellAssay | TransCellAssay/Core/Replica.py | 1 | 15986 | # coding=utf-8
"""
Replica implement the notion of technical replica for plate, in real, it represent one plate
"""
import pandas as pd
import os
import numpy as np
import TransCellAssay as TCA
from TransCellAssay.Core.GenericPlate import GenericPlate
import logging
log = logging.getLogger(__name__)
__author__ = "Arnaud KOPP"
__copyright__ = "© 2014-2017 KOPP Arnaud All Rights Reserved"
__credits__ = ["KOPP Arnaud"]
__license__ = "GPLv3"
__maintainer__ = "Arnaud KOPP"
__email__ = "[email protected]"
class Replica(GenericPlate):
"""
Class for manipulating replica of plate, get all attribute and method from MasterPlate
self.rawdata = rawdata # rawdata object
"""
def __init__(self, name, fpath, FlatFile=True, skip=(), datatype='mean', **kwargs):
"""
Constructor
:param name: name of replica
:param fpath: Data for replica object
:param FlatFile: if true -> data are a csv file where line are cell
:param singleCells: Are data single cell type or not
:param skip: Well to skip
:param datatype: Median or Mean data
"""
super(Replica, self).__init__(name=name, datatype=datatype, skip=skip)
log.debug('Replica created : {}'.format(name))
if not FlatFile:
self.set_data(fpath)
self.__file = None
else:
if isinstance(fpath, str):
if os.path.isfile(fpath):
log.info('Reading FlatFile : %s' % fpath)
self.df = pd.read_csv(fpath, engine='c', **kwargs)
self.__file = fpath
else:
raise IOError('File don\'t exist')
elif isinstance(fpath, TCA.InputFile):
if fpath.dataframe is not None:
self.df = fpath.dataframe
self.__file = fpath.get_file_path()
else:
raise ValueError('Empty Input File')
elif isinstance(fpath, pd.DataFrame):
self.df = fpath
else:
raise NotImplementedError('Input types not handled')
self.__CACHING_gbdata = None
self.__CACHING_gbdata_key = None
def set_name(self, name):
"""
Set name of replica
"""
if name is not None:
self.name = name
def get_channels_list(self):
"""
Get all channels/component in list
:return: list of channel/component
"""
if self.df is not None:
return self.df.columns.tolist()
else:
raise IOError('Empty rawdata')
def set_rawdata(self, df):
"""
Set data in replica
:param df: csv file
"""
assert isinstance(df, pd.DataFrame)
self.df = df
def get_valid_well(self, to_check):
"""
:type to_check: list to check if all well are not to skip
"""
if len(self.skip_well) < 1:
return to_check
if len(to_check) > 0:
# type check
elem = to_check[0]
if isinstance(elem, tuple):
tmp = [x for x in to_check if x not in self.skip_well]
return tmp
if isinstance(elem, str):
tmp = [x for x in to_check if TCA.get_opposite_well_format(x) not in self.skip_well]
return tmp
else:
raise ValueError('Empty List')
def df_to_array(self, chan, rowId='Row', colId='Columm'):
"""
To use only with 1data/well Raw data !!
:param chan: on which channel to work
:param rowId: row Columm name
:param colId: column Column name
:return:
"""
log.warning("Only to use with 1Data/Well Raw data")
size = len(self.df)
if size <= 96:
array = np.zeros((8, 12))
elif size <= 384:
array = np.zeros((16, 24))
elif size > 384:
array = np.zeros((32, 48))
log.warning('1536 well plate size')
for i in range(size):
array[self.df[rowId][i]][self.df[colId][i]] = self.df[chan][i]
return array
def get_unique_well(self):
"""
return all unique wells
:return:
"""
if self.df is None:
raise IOError('Empty rawdata')
return self.df[self.WellKey].unique()
def get_rawdata(self, channel=None, well=None, well_idx=False):
"""
Get Raw data with specified param
:param channel: defined or not channel
:param well: defined or not which well you want, in list [] or simple string format
:param well_idx: add or not well id
:return: raw data in pandas dataframe
"""
if self.df is None:
raise IOError('Empty rawdata')
# # add well to columns that we want
# # check valid channel
if channel is not None and channel not in self.get_channels_list():
raise ValueError('Wrong Channel')
if well_idx:
if not isinstance(channel, list):
channel = [channel]
channel.insert(0, 'Well')
# # init a empty list
data = list()
# # if well not a list -> become a list
if well is not None:
if not isinstance(well, list):
well = [well]
if well not in self.get_unique_well():
raise ValueError('Wrong Well')
# # Grab data
if well is not None:
for i in well:
try:
x = self.__get_Well_group(i, channel)
data.append(x)
except:
pass
# # return wells data for channel
return pd.concat(data)
else:
# # return channel data for all well
return self.df
def compute_data_channel(self, channel, datatype='mean'):
"""
Compute data in matrix form and fill it in .array var, get mean or median for well and save them in
replica object
:param channel: which channel to keep in matrix
:param datatype: mean or median
:return:
"""
if self._array_channel != channel:
log.debug('Overwriting previous channel data from {0} to {1}'.format(
self._array_channel, channel))
self.array = self.__compute_data_channel(channel=channel, type_mean=datatype)
self.datatype = datatype
self._array_channel = channel
def __compute_data_channel(self, channel, type_mean='mean', defsize=None):
"""
Compute mean or median for each well in matrix format
:param channel: Which channel to get
:param type_mean: Mean or median
:param defsize: you can set the size of plate if you want
:return:
"""
if self.df is None:
raise IOError('Empty rawdata')
gbdata = self.get_groupby_data()
if type_mean is 'median':
tmp = gbdata.median()
elif type_mean is 'mean':
tmp = gbdata.mean()
channel_val = tmp[channel]
position_value_dict = channel_val.to_dict() # # dict : key = pos and item are mean
size = len(position_value_dict)
if defsize is None:
data = self.__init_array(size)
else:
data = self.__init_array(defsize)
for key, elem in position_value_dict.items():
try:
pos = TCA.get_opposite_well_format(key)
data[pos[0]][pos[1]] = elem
except IndexError:
return self.get_data_channel(channel)
return data
@staticmethod
def __init_array(size):
if size <= 96:
return np.zeros((8, 12))
elif size <= 384:
return np.zeros((16, 24))
else:
return np.zeros((32, 42))
def get_mean_channels(self):
"""
Compute for all channels the mean for each wells
:return: mean for each wells for all channels
"""
tmp = self.get_groupby_data()
return tmp.mean().reset_index()
def get_median_channels(self):
"""
Compute for all channels the median for each wells
:return: median for each wells for all channels
"""
tmp = self.get_groupby_data()
return tmp.median().reset_index()
def get_data_channel(self, channel, sec=False):
"""
Return data in matrix form, get mean or median for well
:param channel: which channel to keep in matrix
:param sec: want Systematic Error Corrected data ? default=False
:return: compute data in matrix form
"""
if sec:
if self.array_c is None:
raise ValueError('Process Systematic Error Correction method before')
else:
return self.array_c
if self.array is None:
self.compute_data_channel(channel)
if channel is self._array_channel:
return self.array
else:
self.compute_data_channel(channel)
return self.array
def get_count(self):
"""
Get the count for all well
:return:
"""
gb_data = self.get_groupby_data()
cnt = gb_data[self.WellKey].count().to_frame()
cnt.columns = ['Count_'+str(self.name)]
cnt = cnt.fillna(0)
return cnt
def __normalization(self, channel, method='Zscore', log_t=True, neg=None, pos=None, skipping_wells=False,
threshold=None):
"""
Performed normalization on data
:param channel; which channel to normalize
:param method: Performed X Transformation
:param log_t: Performed log2 Transformation
:param pos: positive control
:param neg: negative control
:param skipping_wells: skip defined wells, use it with poc and npi
:param threshold: used in background subtraction (median is 50) you can set as you want
"""
if not self.isNormalized:
log.warning("RawData are already normalized on some channel")
log.debug('Replica {} : RawData normalization on channel {}'.format(self.name, channel))
if skipping_wells:
negative = [x for x in neg if (TCA.get_opposite_well_format(x) not in self.skip_well)]
positive = [x for x in pos if (TCA.get_opposite_well_format(x) not in self.skip_well)]
else:
negative = neg
positive = pos
TCA.rawdata_variability_normalization(self,
channel=channel,
method=method,
log2_transf=log_t,
neg_control=negative,
pos_control=positive,
threshold=threshold)
self.compute_data_channel(channel)
def normalization_channels(self, channels, method='Zscore', log_t=True, neg=None, pos=None, skipping_wells=False,
threshold=None):
"""
Apply a normalization method to multiple
:param pos: positive control
:param neg: negative control
:param channels: channel to normalize
:param method: which method to perform
:param log_t: Performed log2 Transformation
:param skipping_wells: skip defined wells, use it with poc and npi
:param threshold: used in background subtraction (median is 50) you can set as you want
"""
if isinstance(channels, str):
self.__normalization(channel=channels, method=method, log_t=log_t, neg=neg, pos=pos,
skipping_wells=skipping_wells, threshold=threshold)
elif isinstance(channels, list):
for chan in channels:
self.__normalization(channel=chan, method=method, log_t=log_t, neg=neg, pos=pos,
skipping_wells=skipping_wells, threshold=threshold)
log.warning("Choose your channels that you want to work with plate.agg_data_from_replica_channel or "
"replica.data_for_channel")
self.isNormalized = True
self.RawDataNormMethod = method
def write_rawdata(self, path, name=None, **kwargs):
"""
Save normalized Raw data
:param name: Give name to file
:param path: Where to write .csv file
"""
if not os.path.isdir(path):
os.mkdir(path)
if name is None:
name = self.name
try:
self.df.to_csv(os.path.join(path, name), **kwargs)
except Exception as e:
log.error("Writing Raw data problem : {}".format(e))
def write_data(self, path, channel, sec=False):
"""
Write array
:param path:
:param channel:
:param sec:
:return:
"""
self.compute_data_channel(channel=channel)
if sec:
np.savetxt(fname=os.path.join(path, str(self.name)+'_'+str(channel)) + ".csv",
X=self.array_c, delimiter=",", fmt='%1.4f')
else:
np.savetxt(fname=os.path.join(path, str(self.name)+'_'+str(channel)) + ".csv",
X=self.array, delimiter=",", fmt='%1.4f')
def get_file_location(self):
"""
return file location from data
:return:
"""
return self.__file
def clear_cache(self):
"""
Remove some data for saving memory
"""
self.__CACHING_gbdata = None
log.debug('Cache cleared')
def get_groupby_data(self):
"""
Perform a groupby on raw data, a 'caching' is set up for avoid computations if groupby was already performed
:return:
"""
if self.__CACHING_gbdata is not None:
return self.__CACHING_gbdata
else:
self._new_caching()
return self.__CACHING_gbdata
def _new_caching(self):
self.__CACHING_gbdata = self.df.groupby(self.WellKey)
log.debug('Created {} cache'.format(self.name))
def __get_Well_group(self, Well, channel=None):
"""
Get all data for a well
:param channel:
:param Well:
:return:
"""
if self.__CACHING_gbdata is None:
self._new_caching()
if channel is not None:
return self.__CACHING_gbdata.get_group(Well)[channel]
else:
return self.__CACHING_gbdata.get_group(Well)
def remove_wells_data(self, wells):
"""
Remove wells from rawdata
param wells: list of wells
"""
for well in wells:
self.df = self.df[self.df[self.WellKey] != well]
def add_wells_data(self, data):
"""
Add rawdata @ df
"""
try:
self.df = self.df.append(data)
except Exception as e:
print(e)
def __iter__(self):
"""
iterate on group with groups key
"""
if self.__CACHING_gbdata is None:
self._new_caching()
for key, value in self.__CACHING_gbdata.groups.items():
yield key, self.df.iloc[value, :]
def __repr__(self):
"""
Definition for the representation
"""
return ("\nReplica ID : " + repr(self.name) +
"\nData normalized : " + repr(self.isNormalized) +
"\nData systematic error removed : " + repr(self.isSpatialNormalized) +
"\nRawData File location :"+repr(self.__file) +
"\n" + repr(self.df.head()) + "\n")
def __str__(self):
"""
Definition for the print
"""
return self.__repr__()
| gpl-3.0 |
meduz/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
christinahedges/PyKE | pyke/keptrim.py | 2 | 15381 | from .utils import PyKEArgumentHelpFormatter
import time, urllib
import sys
import numpy as np
from astropy.io import fits as pyfits
from matplotlib import pyplot as plt
from . import kepio
from . import kepmsg
from . import kepkey
__all__ = ['keptrim']
def keptrim(infile, column, row, imsize, outfile=None, kepid=None,
overwrite=False, verbose=False, logfile='keptrim.log'):
"""
keptrim -- trim pixels from Target Pixel Files
keptrim will extract a square-shaped series of sub-images from a Target
Pixel File. The simple purpose of this task is to reduce the size of large
data sets such as the superstamps or two-wheel engineering data for the
sake of processing efficiency. Performing a keptrim step speeds up
calculations such as kepprfphot considertably and provides manual
convenience for tasks such as kepmask.
Parameters
----------
infile : str
Filename for the input Target Pixel File.
column : int
The CCD column number on which to center the output subimage.
row : int
The CCD row number on which to center the output subimage.
imsize : int
The pixel size of the subimage along either the row or column
dimension. The subimage will be square.
outfile : str
Filename for the output Target Pixel File. This product will be written
to the same FITS format as archived light curves.
kepid : None or int
If the target is catalogued within the Kepler Input Catalog (KIC), then
the pixel row and column location will be extracted from the KIC
provided the Kepler ID is provided. The user must be online for this
feature to execute. If provided kepid will override column and row.
overwrite : bool
Overwrite the output file?
verbose : bool
Option for verbose mode, in which informative messages and warnings to
the shell and a logfile.
logfile : str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block:: bash
$ keptrim ktwo251248961-c112_lpd-targ.fits 14 770 --imsize 3
--overwrite --verbose
.. image:: ../_static/images/api/keptrim.png
:align: center
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPTRIM -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' column={}'.format(column)
+ ' row={}'.format(row)
+ ' imsize={}'.format(imsize)
+ ' kepid={}'.format(kepid)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPTRIM started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = 'ERROR -- KEPTRIM: {} exists. Use --overwrite'.format(outfile)
kepmsg.err(logfile, errmsg, verbose)
# open input file
instr = pyfits.open(infile, mode='readonly', memmap=True)
cards0 = instr[0].header.cards
cards1 = instr[1].header.cards
cards2 = instr[2].header.cards
# fudge non-compliant FITS keywords with no values
instr = kepkey.emptykeys(instr, infile, logfile, verbose)
# identify the season of observation
try:
season = cards0['SEASON'].value
except:
season = 0
# retrieve column and row from KIC
try:
kic = FOVKepID(str(kepid))
column = int(kic[98 + season * 5])
row = int(kic[97 + season * 5])
except:
pass
# convert CCD column and row to image column and row
if imsize % 2 == 0:
imsize += 1
crpix1p = cards2['CRPIX1P'].value
crpix2p = cards2['CRPIX2P'].value
crval1p = cards2['CRVAL1P'].value
crval2p = cards2['CRVAL2P'].value
cdelt1p = cards2['CDELT1P'].value
cdelt2p = cards2['CDELT2P'].value
imcol = (column - crval1p) * cdelt1p + crpix1p - 1
imrow = (row - crval2p) * cdelt2p + crpix2p - 1
crval1p = column - imsize / 2 + 0.5
crval2p = row - imsize / 2 + 0.5
# check subimage is contained inside the input image
naxis1 = cards2['NAXIS1'].value
naxis2 = cards2['NAXIS2'].value
x1 = int(imcol - imsize // 2 + 0.5)
x2 = x1 + imsize
y1 = int(imrow - imsize // 2 + 0.5)
y2 = y1 + imsize
if x1 < 0 or y1 < 0 or x2 > naxis1 or y2 > naxis2:
errmsg = ('ERROR -- KEPTRIM: Requested pixel area falls outside of '
'the pixel image in file {}. Make the pixel area smaller '
'or relocate it''s center.'.format(infile))
kepmsg.err(logfile, errmsg, verbose)
# time series data
time = instr[1].data.field('TIME')[:]
timecorr = instr[1].data.field('TIMECORR')[:]
cadenceno = instr[1].data.field('CADENCENO')[:]
raw_cnts = instr[1].data.field('RAW_CNTS')[:]
flux = instr[1].data.field('FLUX')[:]
flux_err = instr[1].data.field('FLUX_ERR')[:]
flux_bkg = instr[1].data.field('FLUX_BKG')[:]
flux_bkg_err = instr[1].data.field('FLUX_BKG_ERR')[:]
cosmic_rays = instr[1].data.field('COSMIC_RAYS')[:]
quality = instr[1].data.field('QUALITY')[:]
pos_corr1 = instr[1].data.field('POS_CORR1')[:]
pos_corr2 = instr[1].data.field('POS_CORR2')[:]
# resize time series
raw_cnts = raw_cnts[:, y1:y2, x1:x2]
flux = flux[:, y1:y2, x1:x2]
flux_err = flux_err[:, y1:y2, x1:x2]
flux_bkg = flux_bkg[:, y1:y2, x1:x2]
flux_bkg_err = flux_bkg_err[:, y1:y2, x1:x2]
cosmic_rays = cosmic_rays[:, y1:y2, x1:x2]
# reshape time series images
isize = np.shape(flux)[0]
jsize = np.shape(flux)[1]
ksize = np.shape(flux)[2]
raw_cnts = np.reshape(raw_cnts, (isize, jsize * ksize))
flux = np.reshape(flux, (isize, jsize * ksize))
flux_err = np.reshape(flux_err, (isize, jsize * ksize))
flux_bkg = np.reshape(flux_bkg, (isize, jsize * ksize))
flux_bkg_err = np.reshape(flux_bkg_err, (isize, jsize * ksize))
cosmic_rays = np.reshape(cosmic_rays, (isize, jsize * ksize))
# pixel map data
maskmap = np.array(instr[2].data[y1:y2,x1:x2])
# construct output primary extension
hdu0 = pyfits.PrimaryHDU()
for i in range(len(cards0)):
try:
if cards0[i].keyword not in hdu0.header.keys():
hdu0.header[cards0[i].keyword] = (cards0[i].value, cards0[i].comment)
else:
hdu0.header.cards[cards0[i].keyword].comment = cards0[i].comment
except:
pass
kepkey.history(call, hdu0, outfile, logfile, verbose)
outstr = pyfits.HDUList(hdu0)
# construct output light curve extension
coldim = '(' + str(imsize) + ',' + str(imsize) + ')'
eformat = str(imsize*imsize) + 'E'
jformat = str(imsize*imsize) + 'J'
kformat = str(imsize*imsize) + 'K'
col1 = pyfits.Column(name='TIME', format='D', unit='BJD - 2454833',
array=time)
col2 = pyfits.Column(name='TIMECORR', format='E', unit='d',
array=timecorr)
col3 = pyfits.Column(name='CADENCENO', format='J', array=cadenceno)
col4 = pyfits.Column(name='RAW_CNTS', format=jformat, unit='count',
dim=coldim, array=raw_cnts)
col5 = pyfits.Column(name='FLUX', format=eformat, unit='e-/s', dim=coldim,
array=flux)
col6 = pyfits.Column(name='FLUX_ERR', format=eformat, unit='e-/s',
dim=coldim,array=flux_err)
col7 = pyfits.Column(name='FLUX_BKG', format=eformat, unit='e-/s',
dim=coldim,array=flux_bkg)
col8 = pyfits.Column(name='FLUX_BKG_ERR', format=eformat, unit='e-/s',
dim=coldim, array=flux_bkg_err)
col9 = pyfits.Column(name='COSMIC_RAYS', format=eformat,unit='e-/s',
dim=coldim, array=cosmic_rays)
col10 = pyfits.Column(name='QUALITY', format='J', array=quality)
col11 = pyfits.Column(name='POS_CORR1', format='E', unit='pixel',
array=pos_corr1)
col12 = pyfits.Column(name='POS_CORR2', format='E', unit='pixel',
array=pos_corr2)
cols = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8,
col9, col10, col11, col12])
hdu1 = pyfits.BinTableHDU.from_columns(cols)
for i in range(len(cards1)):
try:
if cards1[i].keyword not in hdu1.header.keys():
hdu1.header[cards1[i].keyword] = (cards1[i].value,
cards1[i].comment)
else:
hdu1.header.cards[cards1[i].keyword].comment = cards1[i].comment
except:
pass
hdu1.header['1CRV4P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV4P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX4'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX4'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
hdu1.header['1CRV5P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV5P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX5'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX5'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
hdu1.header['1CRV6P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV6P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX6'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX6'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
hdu1.header['1CRV7P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV7P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX7'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX7'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
hdu1.header['1CRV8P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV8P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX8'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX8'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
hdu1.header['1CRV9P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV9P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX9'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX9'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
outstr.append(hdu1)
# construct output mask bitmap extension
hdu2 = pyfits.ImageHDU(maskmap)
for i in range(len(cards2)):
try:
if cards2[i].keyword not in hdu2.header.keys():
hdu2.header[cards2[i].keyword] = (cards2[i].value,
cards2[i].comment)
else:
hdu2.header.cards[cards2[i].keyword].comment = cards2[i].comment
except:
pass
hdu2.header['NAXIS1' ] = (imsize, '')
hdu2.header['NAXIS2' ] = (imsize, '')
hdu2.header['CRVAL1P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu2.header['CRVAL2P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu2.header['CRPIX1' ] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu2.header['CRPIX2' ] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
outstr.append(hdu2)
# write output file
print("Writing output file {}...".format(outfile))
outstr.writeto(outfile,checksum=True)
# close input structure
instr.close()
# end time
kepmsg.clock('KEPTRIM finished at', logfile, verbose)
def FOVKepID(id):
"""KIC retrieval based upon KepID"""
# build mast query
url = ('http://archive.stsci.edu/kepler/kepler_fov/search.php?'
'action=Search&kic_kepler_id={}'.format(id) + '&max_records=100'
'&verb=3&outputformat=CSV')
# retrieve results from MAST
out = ''
lines = urllib.urlopen(url)
for line in lines:
line = line.strip()
if (len(line) > 0
and 'Kepler' not in line
and 'integer' not in line
and 'no rows found' not in line):
out = line.split(',')
return out
def keptrim_main():
import argparse
parser = argparse.ArgumentParser(
description='Trim unwanted pixels from a Target Pixel File',
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of input target pixel file',
type=str)
parser.add_argument('column', help='CCD column number of the target',
type=int)
parser.add_argument('row', help='CCD row number of the target', type=int)
parser.add_argument('imsize',
help=('Number of pixels to extract in both row and'
' column dimensions'), type=int)
parser.add_argument('--outfile',
help=('Name of FITS file to output.'
' If None, outfile is infile-keptrim.'),
default=None)
parser.add_argument('--kepid', type=int,
help='Kepler ID number from the Kepler Input Catalog')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite output file?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='keptrim.log', type=str)
args = parser.parse_args()
keptrim(args.infile, args.column, args.row, args.imsize, args.outfile,
args.kepid, args.overwrite, args.verbose, args.logfile)
| mit |
rohanp/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
PythonProgramming/Monte-Carlo-Simulator | montecarlo17 50 50 odds multiple test.py | 1 | 6831 | import random
import matplotlib
import matplotlib.pyplot as plt
import time
## MUST BEAT these!
'''
The question is, can we find a simple variable change where there is both
lower risk, and higher profit... and soon, is this the case accross an average
of 1 million samples.
'''
lower_bust = 31.235
higher_profit = 63.208
# back to 1,000
sampleSize = 1000
startingFunds = 10000
wagerSize = 100
wagerCount = 100
'''
def rollDice():
roll = random.randint(1,100)
if roll == 100:
return False
elif roll <= 50:
return False
elif 100 > roll >= 50:
return True
'''
def rollDice():
roll = random.randint(1,100)
if roll <= 50:
return False
elif roll >= 51:
return True
def multiple_bettor(funds,initial_wager,wager_count):#,color):
#add
global multiple_busts
global multiple_profits
value = funds
wager = initial_wager
wX = []
vY = []
currentWager = 1
previousWager = 'win'
previousWagerAmount = initial_wager
while currentWager <= wager_count:
if previousWager == 'win':
if rollDice():
value += wager
wX.append(currentWager)
vY.append(value)
else:
value -= wager
previousWager = 'loss'
previousWagerAmount = wager
wX.append(currentWager)
vY.append(value)
if value <= 0:
multiple_busts += 1
break
elif previousWager == 'loss':
if rollDice():
#### must change the multiple ####
wager = previousWagerAmount * random_multiple
if (value - wager) <= 0:
wager = value
value += wager
wager = initial_wager
previousWager = 'win'
wX.append(currentWager)
vY.append(value)
else:
wager = previousWagerAmount * random_multiple
if (value - wager) <= 0:
wager = value
value -= wager
previousWager = 'loss'
previousWagerAmount = wager
wX.append(currentWager)
vY.append(value)
if value <= 0:
#change
multiple_busts += 1
break
currentWager += 1
#plt.plot(wX,vY)
#####################
if value > funds:
#change
multiple_profits+=1
def doubler_bettor(funds,initial_wager,wager_count,color):
global doubler_busts
global doubler_profits
value = funds
wager = initial_wager
wX = []
vY = []
currentWager = 1
previousWager = 'win'
previousWagerAmount = initial_wager
while currentWager <= wager_count:
if previousWager == 'win':
if rollDice():
value += wager
wX.append(currentWager)
vY.append(value)
else:
value -= wager
previousWager = 'loss'
previousWagerAmount = wager
wX.append(currentWager)
vY.append(value)
if value < 0:
currentWager += 10000000000000000
doubler_busts += 1
elif previousWager == 'loss':
if rollDice():
wager = previousWagerAmount * 2
if (value - wager) < 0:
wager = value
value += wager
wager = initial_wager
previousWager = 'win'
wX.append(currentWager)
vY.append(value)
else:
wager = previousWagerAmount * 2
if (value - wager) < 0:
wager = value
value -= wager
previousWager = 'loss'
previousWagerAmount = wager
wX.append(currentWager)
vY.append(value)
if value <= 0:
currentWager += 10000000000000000
doubler_busts += 1
currentWager += 1
#plt.plot(wX,vY,color)
#####################
if value > funds:
doubler_profits+=1
def simple_bettor(funds,initial_wager,wager_count,color):
global simple_busts
global simple_profits
value = funds
wager = initial_wager
wX = []
vY = []
currentWager = 1
while currentWager <= wager_count:
if rollDice():
value += wager
wX.append(currentWager)
vY.append(value)
else:
value -= wager
wX.append(currentWager)
vY.append(value)
if value <= 0:
currentWager += 10000000000000000
simple_busts +=1
currentWager += 1
plt.plot(wX,vY,color)
if value > funds:
simple_profits+=1
x = 0
#Doubler Bettor Bust Chances: 84.1457... so anything less than this... aaaand
#Doubler Bettor Profit Chances: 15.6355 ... aaaand better than this.
while x < 10000:
######## move this stuff in here for the maths.
multiple_busts = 0.0
multiple_profits = 0.0
# now we're wanting to do 100 attempts to get a good sample #
multipleSampSize = 100000
currentSample = 1
random_multiple = random.uniform(0.1,10.0)
#random_multiple = 2.00
#print random_multiple
# adding this....
while currentSample <= multipleSampSize:
multiple_bettor(startingFunds,wagerSize,wagerCount)
#add one to sample
currentSample += 1
if ((multiple_busts/multipleSampSize)*100.00 < lower_bust) and ((multiple_profits/multipleSampSize)*100.00 > higher_profit):
print '#################################################'
print 'found a winner, the multiple was:',random_multiple
print 'Lower Bust Rate Than:',lower_bust
print 'Higher profit rate than:',higher_profit
print 'Bust Rate:',(multiple_busts/multipleSampSize)*100.00
print 'Profit Rate:',(multiple_profits/multipleSampSize)*100.00
print '#################################################'
time.sleep(5)
#plt.show()
else:
'''
print '####################################'
print 'To beat:'
print 'Lower Bust Rate Than:',lower_bust
print 'Higher profit rate than:',higher_profit
print 'Bust Rate:',(multiple_busts/multipleSampSize)*100.00
print 'Profit Rate:',(multiple_profits/multipleSampSize)*100.00
print '####################################'
'''
#clears the figure
#plt.clf()
x+=1
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/series/test_repr.py | 11 | 6147 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import sys
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, date_range, option_context)
from pandas.core.index import MultiIndex
from pandas.compat import lrange, range, u
from pandas import compat
import pandas.util.testing as tm
from .common import TestData
class TestSeriesRepr(TestData):
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second", "foo one 0",
" two 1", " three 2",
"bar one 3", " two 4",
"baz two 5", " three 6",
"qux one 7", " two 8",
" three 9", "Name: sth, dtype: int64"]
expected = "\n".join(expected)
assert repr(s) == expected
def test_name_printing(self):
# Test small Series.
s = Series([0, 1, 2])
s.name = "test"
assert "Name: test" in repr(s)
s.name = None
assert "Name:" not in repr(s)
# Test big Series (diff code path).
s = Series(lrange(0, 1000))
s.name = "test"
assert "Name: test" in repr(s)
s.name = None
assert "Name:" not in repr(s)
s = Series(index=date_range('20010101', '20020101'), name='test')
assert "Name: test" in repr(s)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# with Nones
ots = self.ts.astype('O')
ots[::2] = None
repr(ots)
# various names
for name in ['', 1, 1.2, 'foo', u('\u03B1\u03B2\u03B3'),
'loooooooooooooooooooooooooooooooooooooooooooooooooooong',
('foo', 'bar', 'baz'), (1, 2), ('foo', 1, 2.3),
(u('\u03B1'), u('\u03B2'), u('\u03B3')),
(u('\u03B1'), 'bar')]:
self.series.name = name
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
# 0 as name
ser = Series(np.random.randn(100), name=0)
rep_str = repr(ser)
assert "Name: 0" in rep_str
# tidy repr
ser = Series(np.random.randn(1001), name=0)
rep_str = repr(ser)
assert "Name: 0" in rep_str
ser = Series(["a\n\r\tb"], name="a\n\r\td", index=["a\n\r\tf"])
assert "\t" not in repr(ser)
assert "\r" not in repr(ser)
assert "a\n" not in repr(ser)
# with empty series (#4651)
s = Series([], dtype=np.int64, name='foo')
assert repr(s) == 'Series([], Name: foo, dtype: int64)'
s = Series([], dtype=np.int64, name=None)
assert repr(s) == 'Series([], dtype: int64)'
def test_tidy_repr(self):
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a) # should not raise exception
@tm.capture_stderr
def test_repr_bool_fails(self):
s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])
# It works (with no Cython exception barf)!
repr(s)
output = sys.stderr.getvalue()
assert output == ''
def test_repr_name_iterable_indexable(self):
s = Series([1, 2, 3], name=np.int64(3))
# it works!
repr(s)
s.name = (u("\u05d0"), ) * 2
repr(s)
def test_repr_should_return_str(self):
# http://docs.python.org/py3k/reference/datamodel.html#object.__repr__
# http://docs.python.org/reference/datamodel.html#object.__repr__
# ...The return value must be a string object.
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
df = Series(data, index=index1)
assert type(df.__repr__() == str) # both py2 / 3
def test_repr_max_rows(self):
# GH 6863
with pd.option_context('max_rows', None):
str(Series(range(1001))) # should not raise exception
def test_unicode_string_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
bytes(df)
else:
str(df)
def test_timeseries_repr_object_dtype(self):
index = Index([datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)], dtype=object)
ts = Series(np.random.randn(len(index)), index)
repr(ts)
ts = tm.makeTimeSeries(1000)
assert repr(ts).splitlines()[-1].startswith('Freq:')
ts2 = ts.iloc[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
def test_latex_repr(self):
result = r"""\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & $\alpha$ \\
1 & b \\
2 & c \\
\bottomrule
\end{tabular}
"""
with option_context('display.latex.escape', False,
'display.latex.repr', True):
s = Series([r'$\alpha$', 'b', 'c'])
assert result == s._repr_latex_()
assert s._repr_latex_() is None
| mit |
madjelan/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
tanghaibao/jcvi | jcvi/projects/sugarcane.py | 1 | 16674 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# sugarcane.py
# projects
#
# Created by Haibao Tang on 12/02/19
# Copyright © 2019 Haibao Tang. All rights reserved.
#
import os.path as op
import sys
from random import random, sample
from itertools import groupby
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from jcvi.apps.base import OptionParser, ActionDispatcher, mkdir
from jcvi.graphics.base import normalize_axes, adjust_spines, savefig
SoColor = "#7436a4" # Purple
SsColor = "#5a8340" # Green
# Computed using prepare(), corrected with real sizes
ChrSizes = {
"SO-chr01": 148750011,
"SO-chr02": 119865146,
"SO-chr03": 103845728,
"SO-chr04": 104559946,
"SO-chr05": 93134056,
"SO-chr06": 74422021,
"SO-chr07": 81308893,
"SO-chr08": 71010813,
"SO-chr09": 86380266,
"SO-chr10": 73923121,
"SS-chr01": 114519418,
"SS-chr02": 119157314,
"SS-chr03": 85009228,
"SS-chr04": 79762909,
"SS-chr05": 90584537,
"SS-chr06": 95848354,
"SS-chr07": 83589369,
"SS-chr08": 64028871,
}
# Simulate genome composition
class Genome:
def __init__(self, name, prefix, ploidy, haploid_chromosome_count):
"""
Simulate a genome with given ploidy and haploid_chromosome_count. Example:
>>> print(Genome("t", "pf", 2, 3))
test: pf-chr01_a,pf-chr01_b,pf-chr02_a,pf-chr02_b,pf-chr03_a,pf-chr03_b
"""
self.name = name
chromosomes = []
for i in range(haploid_chromosome_count):
chromosomes += [
f"{prefix}-chr{i + 1:02d}_{chr(ord('a') + j)}" for j in range(ploidy)
]
self.chromosomes = chromosomes
def __len__(self):
return len(self.chromosomes)
@classmethod
def make(cls, name, chromosomes):
genome = Genome(name, "", 0, 0)
genome.chromosomes = chromosomes
return genome
@property
def gamete(self):
"""Randomly generate a gamete from current genome that"""
self.chromosomes.sort()
gamete_chromosomes = []
# Check for any chromosome that have 2 identical copies, if so, we will assume disomic
# inheritance for that chromosome and always keep one and only copy
duplicate_chromosomes = []
singleton_chromosomes = []
for chromosome, chromosomes in groupby(self.chromosomes):
chromosomes = list(chromosomes)
ncopies = len(chromosomes)
duplicate_chromosomes += [chromosome] * (ncopies // 2)
if ncopies % 2 == 1:
singleton_chromosomes.append(chromosome)
# Get one copy of each duplicate chromosome first
gamete_chromosomes += duplicate_chromosomes
def prefix(x):
return x.split("_", 1)[0]
# Randomly assign the rest, singleton chromosomes
for group, chromosomes in groupby(singleton_chromosomes, key=prefix):
chromosomes = list(chromosomes)
halfN = len(chromosomes) // 2
# Odd number, e.g. 5, equal chance to be 2 or 3
if len(chromosomes) % 2 != 0 and random() < 0.5:
halfN += 1
gamete_chromosomes += sorted(sample(chromosomes, halfN))
return Genome.make(self.name + " gamete", gamete_chromosomes)
def mate_nplusn(self, name, other_genome, verbose=True):
if verbose:
print(
f"Crossing '{self.name}' x '{other_genome.name}' (n+n)", file=sys.stderr
)
f1_chromosomes = sorted(
self.gamete.chromosomes + other_genome.gamete.chromosomes
)
return Genome.make(name, f1_chromosomes)
def mate_2xnplusn(self, name, other_genome, verbose=True):
if verbose:
print(
f"Crossing '{self.name}' x '{other_genome.name}' (2xn+n)",
file=sys.stderr,
)
f1_chromosomes = sorted(
2 * self.gamete.chromosomes + other_genome.gamete.chromosomes
)
return Genome.make(name, f1_chromosomes)
def mate_2nplusn(self, name, other_genome, verbose=True):
if verbose:
print(
f"Crossing '{self.name}' x '{other_genome.name}' (2n+n)",
file=sys.stderr,
)
f1_chromosomes = sorted(self.chromosomes + other_genome.gamete.chromosomes)
return Genome.make(name, f1_chromosomes)
def __str__(self):
return self.name + ": " + ",".join(self.chromosomes)
@property
def summary(self):
def prefix(x, sep="-"):
return x.split(sep, 1)[0]
def size(chromosomes):
return sum(ChrSizes[prefix(x, sep="_")] for x in chromosomes)
# Chromosome count
total_count = 0
total_unique = 0
total_size = 0
total_so_size = 0
ans = []
for group, chromosomes in groupby(self.chromosomes, prefix):
chromosomes = list(chromosomes)
uniq_chromosomes = set(chromosomes)
group_count = len(chromosomes)
group_unique = len(uniq_chromosomes)
group_so_size = size({x for x in uniq_chromosomes if x[:2] == "SO"})
group_size = size(uniq_chromosomes)
total_count += group_count
total_unique += group_unique
total_so_size += group_so_size
total_size += group_size
ans.append((group, group_count, group_unique, group_so_size, group_size))
ans.append(("Total", total_count, total_unique, total_so_size, total_size))
return ans
def print_summary(self):
print("[SUMMARY]")
for group, group_count, group_unique in self.summary:
print(f"{group}: count={group_count}, unique={group_unique}")
class GenomeSummary:
def __init__(self, SO_data, SS_data, percent_SO_data):
self.SO_data = SO_data
self.SS_data = SS_data
self.percent_SO_data = percent_SO_data
self.percent_SS_data = [100 - x for x in percent_SO_data]
def _summary(self, a, tag, precision=0):
mean, min, max = (
round(np.mean(a), precision),
round(np.min(a), precision),
round(np.max(a), precision),
)
s = f"{tag} chr: {mean:.0f}"
if min == mean and max == mean:
return s
return s + f" ({min:.0f}-{max:.0f})"
def _percent_summary(self, a, tag, precision=1):
mean, min, max = (
round(np.mean(a), precision),
round(np.min(a), precision),
round(np.max(a), precision),
)
s = f"{tag}\%: {mean:.1f}\%"
print(s)
if min == mean and max == mean:
return s
return s + f" ({min:.1f}-{max:.1f}\%)"
@property
def percent_SO_summary(self):
return self._percent_summary(self.percent_SO_data, "So")
@property
def percent_SS_summary(self):
return self._percent_summary(self.percent_SS_data, "Ss")
@property
def SO_summary(self):
return self._summary(self.SO_data, "So")
@property
def SS_summary(self):
return self._summary(self.SS_data, "Ss")
def simulate_F1(SO, SS, verbose=False):
SO_SS_F1_2xnplusn = SO.mate_2xnplusn("SOxSS F1", SS, verbose=verbose)
if verbose:
SO_SS_F1_2xnplusn.print_summary()
return SO_SS_F1_2xnplusn
def simulate_F2(SO, SS, verbose=False):
SO_SS_F1_2xnplusn = simulate_F1(SO, SS, verbose=verbose)
SO_SS_F2_nplusn = SO_SS_F1_2xnplusn.mate_nplusn(
"SOxSS F2", SO_SS_F1_2xnplusn, verbose=verbose
)
if verbose:
SO_SS_F2_nplusn.print_summary()
return SO_SS_F2_nplusn
def simulate_F1intercross(SO, SS, verbose=False):
SO_SS_F1_2xnplusn_1 = simulate_F1(SO, SS, verbose=verbose)
SO_SS_F1_2xnplusn_2 = simulate_F1(SO, SS, verbose=verbose)
SO_SS_F1intercross_nplusn = SO_SS_F1_2xnplusn_1.mate_nplusn(
"SOxSS F1 intercross", SO_SS_F1_2xnplusn_2, verbose=verbose
)
return SO_SS_F1intercross_nplusn
def simulate_BCn(n, SO, SS, verbose=False):
SS_SO_F1_2xnplusn = simulate_F1(SO, SS, verbose=verbose)
SS_SO_BC1_2xnplusn, SS_SO_BC2_nplusn, SS_SO_BC3_nplusn, SS_SO_BC4_nplusn = (
None,
None,
None,
None,
)
# BC1
if n >= 1:
SS_SO_BC1_2xnplusn = SO.mate_2xnplusn(
"SSxSO BC1", SS_SO_F1_2xnplusn, verbose=verbose
)
# BC2
if n >= 2:
SS_SO_BC2_nplusn = SO.mate_nplusn(
"SSxSO BC2", SS_SO_BC1_2xnplusn, verbose=verbose
)
# BC3
if n >= 3:
SS_SO_BC3_nplusn = SO.mate_nplusn(
"SSxSO BC3", SS_SO_BC2_nplusn, verbose=verbose
)
# BC4
if n >= 4:
SS_SO_BC4_nplusn = SO.mate_nplusn(
"SSxSO BC4", SS_SO_BC3_nplusn, verbose=verbose
)
return [
None,
SS_SO_BC1_2xnplusn,
SS_SO_BC2_nplusn,
SS_SO_BC3_nplusn,
SS_SO_BC4_nplusn,
][n]
def plot_summary(ax, samples):
"""Plot the distribution of chromosome numbers given simulated samples.
Args:
ax (Axes): Matplotlib axes.
samples ([Genome]): Summarized genomes.
Returns:
GenomeSummary: Summary statistics of simulated genomes.
"""
SO_data = []
SS_data = []
percent_SO_data = []
for sample in samples:
summary = sample.summary
try:
_, _, group_unique, _, _ = [x for x in summary if x[0] == "SO"][0]
except:
group_unique = 0
SO_data.append(group_unique)
try:
_, _, group_unique, _, _ = [x for x in summary if x[0] == "SS"][0]
except:
group_unique = 0
SS_data.append(group_unique)
total_tag, _, _, total_so_size, total_size = summary[-1]
assert total_tag == "Total"
percent_SO = total_so_size * 100.0 / total_size
percent_SO_data.append(percent_SO)
shift = 0.5 # used to offset bars a bit to avoid cluttering
x, y = zip(*sorted(Counter(SS_data).items()))
ax.bar(np.array(x) - shift, y, color=SsColor, ec=SsColor)
x, y = zip(*sorted(Counter(SO_data).items()))
ax.bar(np.array(x) + shift, y, color=SoColor, ec=SoColor)
ax.set_xlim(80, 0)
ax.set_ylim(0, 500)
ax.set_yticks([])
summary = GenomeSummary(SO_data, SS_data, percent_SO_data)
# Write the stats summary within the plot
summary_style = dict(
size=9,
ha="center",
va="center",
transform=ax.transAxes,
)
ax.text(0.75, 0.85, summary.SS_summary, color=SsColor, **summary_style)
ax.text(0.75, 0.65, summary.percent_SS_summary, color=SsColor, **summary_style)
ax.text(0.25, 0.85, summary.SO_summary, color=SoColor, **summary_style)
ax.text(0.25, 0.65, summary.percent_SO_summary, color=SoColor, **summary_style)
return summary
def write_chromosomes(genomes, filename):
"""Write simulated chromosomes to file
Args:
genomes (List[Genome]): List of simulated genomes.
filename: File path to write to.
"""
print(f"Write chromosomes to `{filename}`", file=sys.stderr)
with open(filename, "w") as fw:
for genome in genomes:
print(genome, file=fw)
def simulate(args):
"""
%prog simulate
Run simulation on female restitution.
"""
import seaborn as sns
sns.set_style("darkgrid")
p = OptionParser(simulate.__doc__)
p.add_option(
"--verbose",
default=False,
action="store_true",
help="Verbose logging during simulation",
)
opts, args, iopts = p.set_image_options(args, figsize="6x6")
if len(args) != 0:
sys.exit(not p.print_help())
# Construct a composite figure with 6 tracks
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
rows = 6
ypad = 0.05
yinterval = (1 - 2 * ypad) / (rows + 1)
yy = 1 - ypad
xpad = 0.2
xwidth = 0.7
# Axes are vertically stacked, and share x-axis
axes = []
yy_positions = [] # Save yy positions so we can show details to the right laterr
for idx in range(rows):
yy_positions.append(yy)
yy -= yinterval
ax = fig.add_axes([xpad, yy, xwidth, yinterval * 0.85])
if idx != rows - 1:
plt.setp(ax.get_xticklabels(), visible=False)
axes.append(ax)
ax1, ax2, ax3, ax4, ax5, ax6 = axes
# Prepare the simulated data
# Simulate two parents
SS = Genome("SS", "SS", 10, 8)
SO = Genome("SO", "SO", 8, 10)
verbose = opts.verbose
all_F1s = [simulate_F1(SO, SS, verbose=verbose) for _ in range(1000)]
all_F2s = [simulate_F2(SO, SS, verbose=verbose) for _ in range(1000)]
all_BC1s = [simulate_BCn(1, SO, SS, verbose=verbose) for _ in range(1000)]
all_BC2s = [simulate_BCn(2, SO, SS, verbose=verbose) for _ in range(1000)]
all_BC3s = [simulate_BCn(3, SO, SS, verbose=verbose) for _ in range(1000)]
all_BC4s = [simulate_BCn(4, SO, SS, verbose=verbose) for _ in range(1000)]
# Plotting
plot_summary(ax1, all_F1s)
plot_summary(ax2, all_F2s)
plot_summary(ax3, all_BC1s)
plot_summary(ax4, all_BC2s)
plot_summary(ax5, all_BC3s)
plot_summary(ax6, all_BC4s)
# Show title to the left
xx = xpad / 2
for (title, subtitle), yy in zip(
(
("F1", None),
("F2", None),
("BC1", None),
("BC2", None),
("BC3", None),
("BC4", None),
),
yy_positions,
):
if subtitle:
yy -= 0.06
else:
yy -= 0.07
root.text(
xx,
yy,
title,
color="darkslategray",
ha="center",
va="center",
fontweight="semibold",
)
if subtitle:
yy -= 0.02
root.text(
xx, yy, subtitle, color="lightslategray", ha="center", va="center"
)
axes[-1].set_xlabel("Number of unique chromosomes")
adjust_spines(axes[-1], ["bottom"], outward=True)
normalize_axes(root)
savefig("plotter.pdf", dpi=120)
outdir = "simulations"
mkdir(outdir)
# Write chromosomes to disk
for genomes, filename in (
(all_F1s, "all_F1s"),
(all_F2s, "all_F2s"),
(all_BC1s, "all_BC1s"),
(all_BC2s, "all_BC2s"),
(all_BC3s, "all_BC3s"),
(all_BC4s, "all_BC4s"),
):
write_chromosomes(genomes, op.join(outdir, filename))
def _get_sizes(filename, prefix_length, tag, target_size=None):
"""Returns a dictionary of chromome lengths from a given file.
Args:
filename ([str]): Path to the input file. Input file is 2-column file
with rows `seqid length`.
prefix_length (int): Extract first N characters.
tag (str): Prepend `tag-` to the seqid.
target_size (int): Expected genome size. Defaults to None.
"""
from collections import defaultdict
sizes_list = defaultdict(list)
with open(filename) as fp:
for row in fp:
if not row.startswith("Chr"):
continue
name, size = row.split()
idx = int(name[3:prefix_length])
size = int(size)
name = f"{tag}-chr{idx:02d}"
sizes_list[name].append(size)
# Get the average length
sizes = dict(
(name, int(round(np.mean(size_list)))) for name, size_list in sizes_list.items()
)
print(sizes)
if target_size is None:
return sizes
total_size = sum(sizes.values())
correction_factor = target_size / total_size
print(
f"{tag} total:{total_size} target:{target_size} correction:{correction_factor:.2f}x"
)
return dict(
(name, int(round(correction_factor * size))) for name, size in sizes.items()
)
def prepare(args):
"""
%prog SoChrLen.txt SsChrLen.txt
Calculate lengths from real sugarcane data.
"""
p = OptionParser(prepare.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
solist, sslist = args
# The haploid set of LA Purple is 957.2 Mb and haploid set of US56-14-4 is 732.5 Mb
sizes = _get_sizes(solist, 5, "SO", target_size=957.2 * 1e6)
sizes.update(_get_sizes(sslist, 4, "SS", target_size=732.5 * 1e6))
print(sizes)
def main():
actions = (
("prepare", "Calculate lengths from real sugarcane data"),
("simulate", "Run simulation on female restitution"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
if __name__ == "__main__":
main()
| bsd-2-clause |
LevinJ/SSD_tensorflow_VOC | exercise/readfromtfrecords_batch_eval.py | 1 | 3147 | from datasets import dataset_factory
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
from nets import nets_factory
from preprocessing import preprocessing_factory
import numpy as np
class ReadRecordsBatchEval(object):
def __init__(self):
self.dataset_name = 'flowers'
self.dataset_split_name = 'validation'
self.dataset_dir = '/home/levin/workspace/detection/data/flower'
self.batch_size = 32
self.labels_offset = 0
self.eval_image_size = None
self.preprocessing_name = None
self.model_name = 'inception_v3'
self.num_preprocessing_threads = 4
return
def disp_image(self,image, label):
plt.figure()
plt.imshow(image)
plt.axis('off')
plt.title(str(label))
plt.show()
return
def __get_images_labels(self):
dataset = dataset_factory.get_dataset(
self.dataset_name, self.dataset_split_name, self.dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=False,
common_queue_capacity=2 * self.batch_size,
common_queue_min=self.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= self.labels_offset
network_fn = nets_factory.get_network_fn(
self.model_name,
num_classes=(dataset.num_classes - self.labels_offset),
is_training=False)
preprocessing_name = self.preprocessing_name or self.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=False)
eval_image_size = self.eval_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=self.batch_size,
num_threads=self.num_preprocessing_threads,
capacity=5 * self.batch_size)
return images, labels
def run(self):
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
print("create symbolic op")
images, labels = self.__get_images_labels()
with tf.Session('') as sess:
init = tf.global_variables_initializer()
sess.run(init)
with slim.queues.QueueRunners(sess):
for _ in range(2):
images_data, labels_data = sess.run([images, labels])
images_data = images_data[0]
labels_data = labels_data[0]
images_data = ((images_data/2 + 0.5)*255).astype(np.uint8)
self.disp_image(images_data, labels_data)
return
if __name__ == "__main__":
obj= ReadRecordsBatchEval()
obj.run() | apache-2.0 |
Rbeaty88/ginga | ginga/mplw/transform.py | 1 | 10643 | #
# transform.py -- a custom projection for supporting matplotlib plotting
# on ginga
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
# NOTE: this code is based on "custom_projection_example.py", an example
# script developed by matplotlib developers
#
import matplotlib
from matplotlib.axes import Axes
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, BboxTransformTo, Transform, \
blended_transform_factory
from matplotlib.projections import register_projection
import numpy as np
class GingaAxes(Axes):
"""
This is a custom matplotlib projection to support matplotlib plotting
on a ginga-rendered image in a matplotlib Figure.
This code is based on 'custom_projection_example.py', an example
script developed by matplotlib developers.
"""
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='ginga')``.
name = 'ginga'
def __init__(self, *args, **kwargs):
# this is the Ginga object
self.fitsimage = kwargs.pop('fitsimage', None)
Axes.__init__(self, *args, **kwargs)
## self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def set_fitsimage(self, fitsimage):
self.fitsimage = fitsimage
self.transData.fitsimage = fitsimage
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# There are three important coordinate spaces going on here:
#
# 1. Data space: The space of the data itself
#
# 2. Axes space: The unit rectangle (0, 0) to (1, 1)
# covering the entire plot area.
#
# 3. Display space: The coordinates of the resulting image,
# often in pixels or dpi/inch.
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# The goal of the first two transformations is to get from the
# data space to axes space. It is separated into a non-affine
# and affine part so that the non-affine part does not have to be
# recomputed when a simple affine change to the figure has been
# made (such as resizing the window or changing the dpi).
# 3) This is the transformation from axes space to display
# space.
self.transAxes = BboxTransformTo(self.bbox)
# Now put these 3 transforms together -- from data all the way
# to display coordinates. Using the '+' operator, these
# transforms will be applied "in order". The transforms are
# automatically simplified, if possible, by the underlying
# transformation framework.
#self.transData = \
# self.transProjection + self.transAffine + self.transAxes
self.transData = self.GingaTransform()
self.transData.fitsimage = self.fitsimage
# self._xaxis_transform = blended_transform_factory(
# self.transData, self.transAxes)
# self._yaxis_transform = blended_transform_factory(
# self.transAxes, self.transData)
self._xaxis_transform = self.transData
self._yaxis_transform = self.transData
# Prevent the user from applying scales to one or both of the
# axes. In this particular case, scaling the axes wouldn't make
# sense, so we don't allow it.
def set_xscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_xscale(self, *args, **kwargs)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_yscale(self, *args, **kwargs)
# Prevent the user from changing the axes limits. This also
# applies to interactive panning and zooming in the GUI interfaces.
## def set_xlim(self, *args, **kwargs):
## print "Setting xlim!", args
## def set_ylim(self, *args, **kwargs):
## print "Setting ylim!", args
def format_coord(self, x, y):
"""
Override this method to change how the values are displayed in
the status bar.
"""
return 'x=%f, y=%f' % (x, y)
def get_data_ratio(self):
"""
Return the aspect ratio of the data itself.
This method should be overridden by any Axes that have a
fixed data ratio.
"""
return 1.0
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
# TODO: get zoom box working
return False
def can_pan(self):
"""
Return True if this axes support the zoom box
"""
return True
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
bd = self.fitsimage.get_bindings()
data_x, data_y = self.fitsimage.get_data_xy(x, y)
bd.ms_pan(self.fitsimage, 'down', data_x, data_y)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
bd = self.fitsimage.get_bindings()
data_x, data_y = self.fitsimage.get_last_data_xy()
bd.ms_pan(self.fitsimage, 'up', data_x, data_y)
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
bd = self.fitsimage.get_bindings()
data_x, data_y = self.fitsimage.get_data_xy(x, y)
bd.ms_pan(self.fitsimage, 'move', data_x, data_y)
# Now, the transforms themselves.
class GingaTransform(Transform):
"""
The base Ginga transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
fitsimage = None
#pass_through = True
def invalidate(self):
print "I don't feel validated! (%s)" % (self.pass_through)
return Transform.invalidate(self)
def _transform_xy(self, n):
#win_wd, win_ht = self.fitsimage.get_window_size()
win_x, win_y = self.fitsimage.get_canvas_xy(n[0], n[1])
#return float(win_x) / win_wd, float(win_y) / win_ht
return (win_x, win_y)
def transform_non_affine(self, xy):
"""
Override the transform_non_affine method to implement the custom
transform.
The input and output are Nx2 numpy arrays.
"""
#print "transform in:", xy
if self.fitsimage == None:
return xy
res = np.array(map(self._transform_xy, xy))
#print "transform out:", res
return res
# This is where things get interesting. With this projection,
# straight lines in data space become curves in display space.
# This is done by interpolating new values between the input
# values of the data. Since ``transform`` must not return a
# differently-sized array, any transform that requires
# changing the length of the data array must happen within
# ``transform_path``.
def transform_path_non_affine(self, path):
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = \
Transform.transform_path_non_affine.__doc__
if matplotlib.__version__ < '1.2':
# Note: For compatibility with matplotlib v1.1 and older, you'll
# need to explicitly implement a ``transform`` method as well.
# Otherwise a ``NotImplementedError`` will be raised. This isn't
# necessary for v1.2 and newer, however.
transform = transform_non_affine
# Similarly, we need to explicitly override ``transform_path`` if
# compatibility with older matplotlib versions is needed. With v1.2
# and newer, only overriding the ``transform_path_non_affine``
# method is sufficient.
transform_path = transform_path_non_affine
transform_path.__doc__ = Transform.transform_path.__doc__
def inverted(self):
tform = GingaAxes.InvertedGingaTransform()
tform.fitsimage = self.fitsimage
return tform
inverted.__doc__ = Transform.inverted.__doc__
class InvertedGingaTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
fitsimage = None
def _transform_xy(self, n):
return self.fitsimage.get_data_xy(n[0], n[1])
def transform_non_affine(self, xy):
#print "transform in:", xy
if self.fitsimage == None:
return xy
res = np.array(map(self._transform_xy, xy))
#print "transform out:", res
return res
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
# As before, we need to implement the "transform" method for
# compatibility with matplotlib v1.1 and older.
if matplotlib.__version__ < '1.2':
transform = transform_non_affine
def inverted(self):
# The inverse of the inverse is the original transform... ;)
tform = GingaAxes.GingaTransform()
tform.fitsimage = self.fitsimage
return tform
inverted.__doc__ = Transform.inverted.__doc__
# Now register the projection with matplotlib so the user can select
# it.
register_projection(GingaAxes)
#END
| bsd-3-clause |
Vishluck/sympy | sympy/physics/quantum/tests/test_circuitplot.py | 93 | 2065 | from sympy.physics.quantum.circuitplot import labeller, render_label, Mz, CreateOneQubitGate,\
CreateCGate
from sympy.physics.quantum.gate import CNOT, H, SWAP, CGate, S, T
from sympy.external import import_module
from sympy.utilities.pytest import skip
mpl = import_module('matplotlib')
def test_render_label():
assert render_label('q0') == r'$|q0\rangle$'
assert render_label('q0', {'q0': '0'}) == r'$|q0\rangle=|0\rangle$'
def test_Mz():
assert str(Mz(0)) == 'Mz(0)'
def test_create1():
Qgate = CreateOneQubitGate('Q')
assert str(Qgate(0)) == 'Q(0)'
def test_createc():
Qgate = CreateCGate('Q')
assert str(Qgate([1],0)) == 'C((1),Q(0))'
def test_labeller():
"""Test the labeller utility"""
assert labeller(2) == ['q_1', 'q_0']
assert labeller(3,'j') == ['j_2', 'j_1', 'j_0']
def test_cnot():
"""Test a simple cnot circuit. Right now this only makes sure the code doesn't
raise an exception, and some simple properties
"""
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
c = CircuitPlot(CNOT(1,0),2)
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == []
def test_ex1():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0)*H(1),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
def test_ex4():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(SWAP(0,2)*H(0)* CGate((0,),S(1)) *H(1)*CGate((0,),T(2))\
*CGate((1,),S(2))*H(2),3,labels=labeller(3,'j'))
assert c.ngates == 7
assert c.nqubits == 3
assert c.labels == ['j_2', 'j_1', 'j_0']
| bsd-3-clause |
yanlend/scikit-learn | sklearn/manifold/t_sne.py | 52 | 34602 | # Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..neighbors import BallTree
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..utils.fixes import astype
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
neighbors = astype(neighbors, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors: int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = astype(params, np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = astype(neighbors, np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017599, 0.00003993],
[ 0.00009891, 0.00021913],
[ 0.00018554, -0.00009357],
[ 0.00009528, -0.00001407]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if init not in ["pca", "random"] or isinstance(init, np.ndarray):
msg = "'init' must be 'pca', 'random' or a NumPy array"
raise ValueError(msg)
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.embedding_ = None
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TrucnatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
X = check_array(X, dtype=np.float32)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {}
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = 0.0
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(obj_func, params, **opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Save the final number of iterations
self.n_iter_final = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
def _check_fitted(self):
if self.embedding_ is None:
raise ValueError("Cannot call `transform` unless `fit` has"
"already been called")
| bsd-3-clause |
abhisg/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
antmd/graph-tool | doc/conf.py | 3 | 8055 | # -*- coding: utf-8 -*-
#
# graph-tool documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 26 18:29:16 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('.'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'mathjax', 'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.extlinks',
'sphinx.ext.viewcode'
#'sphinx.ext.linkcode'
#'matplotlib.sphinxext.plot_directive'
]
mathjax_path = "MathJax/MathJax.js?config=default"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'graph-tool'
copyright = u'2015, Tiago de Paula Peixoto <[email protected]>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from graph_tool import __version__ as gt_version
version = gt_version.split()[0]
# The full version, including alpha/beta/rc tags.
release = gt_version.split()[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# doctest
doctest_global_setup = open("pyenv.py").read()
# Options for HTML outputs
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'default.css'
html_theme = "gt_theme"
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "graph-icon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'http://graph-tool.skewed.de/doc/'
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'graph-tooldoc'
# Options for LaTeX output
# ------------------------
# Grouping the document tree into LaTeX files. List of tuples (source start
# file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'graph-tool.tex', ur'graph-tool documentation',
ur'Tiago de Paula Peixoto', 'manual'),
]
latex_preamble = """
\setcounter{tocdepth}{2}
"""
latex_show_pagerefs = True
latex_show_urls = False
latex_paper_size = "a4"
latex_logo = "blockmodel.pdf"
latex_elements = {
'papersize': "a4paper",
'fontpkg': r"\usepackage{bookman}"}
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'cairo': ('http://www.cairographics.org/documentation/pycairo/3/', None),
'ipython': ('http://ipython.org/ipython-doc/stable/', None),
'panda': ('http://pandas.pydata.org/pandas-docs/stable/', None)}
extlinks = {'ticket': ('http://graph-tool.skewed.de/tickets/ticket/%s',
'ticket '),
'doi': ('http://dx.doi.org/%s', 'DOI: '),
'arxiv': ('http://arxiv.org/abs/%s', 'arXiv: ')}
# def process_docstring(app, what, name, obj, options, lines):
# for i, line in enumerate(lines):
# if "arg1" in line and "->" in line:
# lines[i] = ""
# if "C++ signature :" in line or "graph_tool::Python" in line:
# lines[i] = ""
# def setup(app):
# app.connect('autodoc-process-docstring', process_docstring)
# plot directive
import pyenv
plot_rcparams = pyenv.rcParams
#plot_pre_code = open("pyenv.py").read()
autodoc_default_flags = ['members', 'undoc-members']
numpydoc_show_class_members = False
autodoc_docstring_signature = False
autodoc_member_order = 'bysource'
autoclass_content = 'both'
imported_members = True
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
modname = info['module'].replace('.', '/')
return "https://git.skewed.de/count0/graph-tool/tree/master/src/%s/__init__.py" % modname
| gpl-3.0 |
jlegendary/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
clarkfitzg/xray | xray/core/indexing.py | 4 | 13941 | from datetime import timedelta
import numpy as np
import pandas as pd
from . import utils
from .pycompat import iteritems, range, dask_array_type
from .utils import is_full_slice
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError('too many indices')
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key)
def canonicalize_indexer(key, ndim):
"""Given an indexer for orthogonal array indexing, return an indexer that
is a tuple composed entirely of slices, integer ndarrays and native python
ints.
"""
def canonicalize(indexer):
if not isinstance(indexer, slice):
indexer = np.asarray(indexer)
if indexer.ndim == 0:
indexer = int(np.asscalar(indexer))
else:
if indexer.ndim != 1:
raise ValueError('orthogonal array indexing only supports '
'1d arrays')
if indexer.dtype.kind == 'b':
indexer, = np.nonzero(indexer)
elif indexer.dtype.kind != 'i':
raise ValueError('invalid subkey %r for integer based '
'array indexing; all subkeys must be '
'slices, integers or sequences of '
'integers or Booleans' % indexer)
return indexer
return tuple(canonicalize(k) for k in expanded_indexer(key, ndim))
def _expand_slice(slice_, size):
return np.arange(*slice_.indices(size))
def orthogonal_indexer(key, shape):
"""Given a key for orthogonal array indexing, returns an equivalent key
suitable for indexing a numpy.ndarray with fancy indexing.
"""
# replace Ellipsis objects with slices
key = list(canonicalize_indexer(key, len(shape)))
# replace 1d arrays and slices with broadcast compatible arrays
# note: we treat integers separately (instead of turning them into 1d
# arrays) because integers (and only integers) collapse axes when used with
# __getitem__
non_int_keys = [n for n, k in enumerate(key) if not isinstance(k, (int, np.integer))]
def full_slices_unselected(n_list):
def all_full_slices(key_index):
return all(is_full_slice(key[n]) for n in key_index)
if not n_list:
return n_list
elif all_full_slices(range(n_list[0] + 1)):
return full_slices_unselected(n_list[1:])
elif all_full_slices(range(n_list[-1], len(key))):
return full_slices_unselected(n_list[:-1])
else:
return n_list
# However, testing suggests it is OK to keep contiguous sequences of full
# slices at the start or the end of the key. Keeping slices around (when
# possible) instead of converting slices to arrays significantly speeds up
# indexing.
# (Honestly, I don't understand when it's not OK to keep slices even in
# between integer indices if as array is somewhere in the key, but such are
# the admittedly mind-boggling ways of numpy's advanced indexing.)
array_keys = full_slices_unselected(non_int_keys)
def maybe_expand_slice(k, length):
return _expand_slice(k, length) if isinstance(k, slice) else k
array_indexers = np.ix_(*(maybe_expand_slice(key[n], shape[n])
for n in array_keys))
for i, n in enumerate(array_keys):
key[n] = array_indexers[i]
return tuple(key)
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _get_loc(index, label, method=None):
"""Backwards compatible wrapper for Index.get_loc, which only added the
method argument in pandas 0.16
"""
if method is not None:
return index.get_loc(label, method=method)
else:
return index.get_loc(label)
def convert_label_indexer(index, label, index_name='', method=None):
"""Given a pandas.Index (or xray.Coordinate) and labels (e.g., from
__getitem__) for one dimension, return an indexer suitable for indexing an
ndarray along that dimension
"""
if isinstance(label, slice):
if method is not None:
raise NotImplementedError(
'cannot yet use the ``method`` argument if any indexers are '
'slice objects')
indexer = index.slice_indexer(_try_get_item(label.start),
_try_get_item(label.stop),
_try_get_item(label.step))
else:
label = np.asarray(label)
if label.ndim == 0:
indexer = _get_loc(index, np.asscalar(label), method=method)
elif label.dtype.kind == 'b':
indexer, = np.nonzero(label)
else:
indexer = index.get_indexer(label, method=method)
if np.any(indexer < 0):
raise ValueError('not all values found in index %r'
% index_name)
return indexer
def remap_label_indexers(data_obj, indexers, method=None):
"""Given an xray data object and label based indexers, return a mapping
of equivalent location based indexers.
"""
return dict((dim, convert_label_indexer(data_obj[dim].to_index(), label,
dim, method))
for dim, label in iteritems(indexers))
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + step
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step)
def _index_indexer_1d(old_indexer, applied_indexer, size):
assert isinstance(applied_indexer, (int, np.integer, slice, np.ndarray))
if isinstance(applied_indexer, slice) and applied_indexer == slice(None):
# shortcut for the usual case
return old_indexer
if isinstance(old_indexer, slice):
if isinstance(applied_indexer, slice):
indexer = slice_slice(old_indexer, applied_indexer, size)
else:
indexer = _expand_slice(old_indexer, size)[applied_indexer]
else:
indexer = old_indexer[applied_indexer]
return indexer
class LazyIntegerRange(utils.NDArrayMixin):
def __init__(self, *args, **kwdargs):
"""
Parameters
----------
See np.arange
"""
self.args = args
self.kwdargs = kwdargs
assert 'dtype' not in self.kwdargs
# range will fail if any arguments are not integers
self.array = range(*args, **kwdargs)
@property
def shape(self):
return (len(self.array),)
@property
def dtype(self):
return np.dtype('int64')
@property
def ndim(self):
return 1
@property
def size(self):
return len(self.array)
def __getitem__(self, key):
return np.array(self)[key]
def __array__(self, dtype=None):
return np.arange(*self.args, **self.kwdargs)
def __repr__(self):
return ('%s(array=%r)' %
(type(self).__name__, self.array))
class LazilyIndexedArray(utils.NDArrayMixin):
"""Wrap an array that handles orthogonal indexing to make indexing lazy
"""
def __init__(self, array, key=None):
"""
Parameters
----------
array : array_like
Array like object to index.
key : tuple, optional
Array indexer. If provided, it is assumed to already be in
canonical expanded form.
"""
if key is None:
key = (slice(None),) * array.ndim
self.array = array
self.key = key
def _updated_key(self, new_key):
new_key = iter(canonicalize_indexer(new_key, self.ndim))
key = []
for size, k in zip(self.array.shape, self.key):
if isinstance(k, (int, np.integer)):
key.append(k)
else:
key.append(_index_indexer_1d(k, next(new_key), size))
return tuple(key)
@property
def shape(self):
shape = []
for size, k in zip(self.array.shape, self.key):
if isinstance(k, slice):
shape.append(len(range(*k.indices(size))))
elif isinstance(k, np.ndarray):
shape.append(k.size)
return tuple(shape)
def __array__(self, dtype=None):
array = orthogonally_indexable(self.array)
return np.asarray(array[self.key], dtype=None)
def __getitem__(self, key):
return type(self)(self.array, self._updated_key(key))
def __setitem__(self, key, value):
key = self._updated_key(key)
self.array[key] = value
def __repr__(self):
return ('%s(array=%r, key=%r)' %
(type(self).__name__, self.array, self.key))
def orthogonally_indexable(array):
if isinstance(array, np.ndarray):
return NumpyIndexingAdapter(array)
if isinstance(array, pd.Index):
return PandasIndexAdapter(array)
if isinstance(array, dask_array_type):
return DaskIndexingAdapter(array)
return array
class NumpyIndexingAdapter(utils.NDArrayMixin):
"""Wrap a NumPy array to use orthogonal indexing (array indexing
accesses different dimensions independently, like netCDF4-python variables)
"""
# note: this object is somewhat similar to biggus.NumpyArrayAdapter in that
# it implements orthogonal indexing, except it casts to a numpy array,
# isn't lazy and supports writing values.
def __init__(self, array):
self.array = np.asarray(array)
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def _convert_key(self, key):
key = expanded_indexer(key, self.ndim)
if any(not isinstance(k, (int, np.integer, slice)) for k in key):
# key would trigger fancy indexing
key = orthogonal_indexer(key, self.shape)
return key
def __getitem__(self, key):
key = self._convert_key(key)
return self.array[key]
def __setitem__(self, key, value):
key = self._convert_key(key)
self.array[key] = value
class DaskIndexingAdapter(utils.NDArrayMixin):
"""Wrap a dask array to support orthogonal indexing
"""
def __init__(self, array):
self.array = array
def __getitem__(self, key):
key = expanded_indexer(key, self.ndim)
if any(not isinstance(k, (int, np.integer, slice)) for k in key):
value = self.array
for axis, subkey in reversed(list(enumerate(key))):
value = value[(slice(None),) * axis + (subkey,)]
else:
value = self.array[key]
return value
class PandasIndexAdapter(utils.NDArrayMixin):
"""Wrap a pandas.Index to be better about preserving dtypes and to handle
indexing by length 1 tuples like numpy
"""
def __init__(self, array, dtype=None):
self.array = utils.safe_cast_to_index(array)
if dtype is None:
dtype = array.dtype
self._dtype = dtype
@property
def dtype(self):
return self._dtype
def __array__(self, dtype=None):
if dtype is None:
dtype = self.dtype
return self.array.values.astype(dtype)
def __getitem__(self, key):
if isinstance(key, tuple) and len(key) == 1:
# unpack key so it can index a pandas.Index object (pandas.Index
# objects don't like tuples)
key, = key
if isinstance(key, (int, np.integer)):
value = self.array[key]
if value is pd.NaT:
# work around the impossibility of casting NaT with asarray
# note: it probably would be better in general to return
# pd.Timestamp rather np.than datetime64 but this is easier
# (for now)
value = np.datetime64('NaT', 'ns')
elif isinstance(value, timedelta):
value = np.timedelta64(getattr(value, 'value', value), 'ns')
else:
value = np.asarray(value, dtype=self.dtype)
else:
value = PandasIndexAdapter(self.array[key], dtype=self.dtype)
return value
def __repr__(self):
return ('%s(array=%r, dtype=%r)'
% (type(self).__name__, self.array, self.dtype))
| apache-2.0 |
almarklein/scikit-image | doc/examples/plot_local_otsu.py | 2 | 1552 | """
====================
Local Otsu Threshold
====================
This example shows how Otsu's threshold [1]_ method can be applied locally. For
each pixel, an "optimal" threshold is determined by maximizing the variance
between two classes of pixels of the local neighborhood defined by a structuring
element.
The example compares the local threshold with the global threshold.
.. note: local is much slower than global thresholding
.. [1] http://en.wikipedia.org/wiki/Otsu's_method
"""
import matplotlib
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filter import threshold_otsu, rank
from skimage.util import img_as_ubyte
matplotlib.rcParams['font.size'] = 9
img = img_as_ubyte(data.page())
radius = 15
selem = disk(radius)
local_otsu = rank.otsu(img, selem)
threshold_global_otsu = threshold_otsu(img)
global_otsu = img >= threshold_global_otsu
plt.figure(figsize=(8, 5))
plt.subplot(2, 2, 1)
plt.imshow(img, cmap=plt.cm.gray)
plt.title('Original')
plt.colorbar(orientation='horizontal')
plt.axis('off')
plt.subplot(2, 2, 2)
plt.imshow(local_otsu, cmap=plt.cm.gray)
plt.title('Local Otsu (radius=%d)' % radius)
plt.colorbar(orientation='horizontal')
plt.axis('off')
plt.subplot(2, 2, 3)
plt.imshow(img >= local_otsu, cmap=plt.cm.gray)
plt.title('Original >= Local Otsu' % threshold_global_otsu)
plt.axis('off')
plt.subplot(2, 2, 4)
plt.imshow(global_otsu, cmap=plt.cm.gray)
plt.title('Global Otsu (threshold = %d)' % threshold_global_otsu)
plt.axis('off')
plt.show()
| bsd-3-clause |
phobson/statsmodels | statsmodels/examples/ex_proportion.py | 33 | 1918 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 21 07:59:26 2013
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lmap
import numpy as np
import statsmodels.stats.proportion as sms
import statsmodels.stats.weightstats as smw
from numpy.testing import assert_almost_equal
# Region, Eyes, Hair, Count
ss = '''\
1 blue fair 23 1 blue red 7 1 blue medium 24
1 blue dark 11 1 green fair 19 1 green red 7
1 green medium 18 1 green dark 14 1 brown fair 34
1 brown red 5 1 brown medium 41 1 brown dark 40
1 brown black 3 2 blue fair 46 2 blue red 21
2 blue medium 44 2 blue dark 40 2 blue black 6
2 green fair 50 2 green red 31 2 green medium 37
2 green dark 23 2 brown fair 56 2 brown red 42
2 brown medium 53 2 brown dark 54 2 brown black 13'''
dta0 = np.array(ss.split()).reshape(-1,4)
dta = np.array(lmap(tuple, dta0.tolist()), dtype=[('Region', int), ('Eyes', 'S6'), ('Hair', 'S6'), ('Count', int)])
xfair = np.repeat([1,0], [228, 762-228])
# comparing to SAS last output at
# http://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_freq_sect028.htm
# confidence interval for tost
ci01 = smw.confint_ztest(xfair, alpha=0.1)
assert_almost_equal(ci01, [0.2719, 0.3265], 4)
res = smw.ztost(xfair, 0.18, 0.38)
assert_almost_equal(res[1][0], 7.1865, 4)
assert_almost_equal(res[2][0], -4.8701, 4)
nn = np.arange(200, 351)
pow_z = sms.power_ztost_prop(0.5, 0.72, nn, 0.6, alpha=0.05)
pow_bin = sms.power_ztost_prop(0.5, 0.72, nn, 0.6, alpha=0.05, dist='binom')
import matplotlib.pyplot as plt
plt.plot(nn, pow_z[0], label='normal')
plt.plot(nn, pow_bin[0], label='binomial')
plt.legend(loc='lower right')
plt.title('Proportion Equivalence Test: Power as function of sample size')
plt.xlabel('Number of Observations')
plt.ylabel('Power')
plt.show()
| bsd-3-clause |
shyamalschandra/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
DVegaCapital/zipline | zipline/algorithm.py | 2 | 48191 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
import warnings
import pytz
import pandas as pd
import numpy as np
from datetime import datetime
from itertools import groupby, chain
from six.moves import filter
from six import (
exec_,
iteritems,
itervalues,
string_types,
)
from operator import attrgetter
from zipline.errors import (
AddTermPostInit,
OrderDuringInitialize,
OverrideCommissionPostInit,
OverrideSlippagePostInit,
RegisterAccountControlPostInit,
RegisterTradingControlPostInit,
UnsupportedCommissionModel,
UnsupportedOrderParameters,
UnsupportedSlippageModel,
UnsupportedDatetimeFormat,
)
from zipline.finance.trading import TradingEnvironment
from zipline.finance.blotter import Blotter
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.controls import (
LongOnly,
MaxOrderCount,
MaxOrderSize,
MaxPositionSize,
MaxLeverage,
RestrictedListOrder
)
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.finance.performance import PerformanceTracker
from zipline.finance.slippage import (
VolumeShareSlippage,
SlippageModel,
transact_partial
)
from zipline.assets import Asset, Future
from zipline.assets.futures import FutureChain
from zipline.gens.composites import date_sorted_sources
from zipline.gens.tradesimulation import AlgorithmSimulator
from zipline.modelling.engine import (
NoOpFFCEngine,
SimpleFFCEngine,
)
from zipline.sources import DataFrameSource, DataPanelSource
from zipline.utils.api_support import (
api_method,
require_not_initialized,
ZiplineAPI,
)
import zipline.utils.events
from zipline.utils.events import (
EventManager,
make_eventrule,
DateRuleFactory,
TimeRuleFactory,
)
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.math_utils import tolerant_equals
import zipline.protocol
from zipline.protocol import Event
from zipline.history import HistorySpec
from zipline.history.history_container import HistoryContainer
DEFAULT_CAPITAL_BASE = float("1.0e5")
class TradingAlgorithm(object):
"""
Base class for trading algorithms. Inherit and overload
initialize() and handle_data(data).
A new algorithm could look like this:
```
from zipline.api import order, symbol
def initialize(context):
context.sid = symbol('AAPL')
context.amount = 100
def handle_data(context, data):
sid = context.sid
amount = context.amount
order(sid, amount)
```
To then to run this algorithm pass these functions to
TradingAlgorithm:
my_algo = TradingAlgorithm(initialize, handle_data)
stats = my_algo.run(data)
"""
def __init__(self, *args, **kwargs):
"""Initialize sids and other state variables.
:Arguments:
:Optional:
initialize : function
Function that is called with a single
argument at the begninning of the simulation.
handle_data : function
Function that is called with 2 arguments
(context and data) on every bar.
script : str
Algoscript that contains initialize and
handle_data function definition.
data_frequency : {'daily', 'minute'}
The duration of the bars.
capital_base : float <default: 1.0e5>
How much capital to start with.
instant_fill : bool <default: False>
Whether to fill orders immediately or on next bar.
asset_finder : An AssetFinder object
A new AssetFinder object to be used in this TradingEnvironment
asset_metadata: can be either:
- dict
- pandas.DataFrame
- object with 'read' property
If dict is provided, it must have the following structure:
* keys are the identifiers
* values are dicts containing the metadata, with the metadata
field name as the key
If pandas.DataFrame is provided, it must have the
following structure:
* column names must be the metadata fields
* index must be the different asset identifiers
* array contents should be the metadata value
If an object with a 'read' property is provided, 'read' must
return rows containing at least one of 'sid' or 'symbol' along
with the other metadata fields.
identifiers : List
Any asset identifiers that are not provided in the
asset_metadata, but will be traded by this TradingAlgorithm
"""
self.sources = []
# List of trading controls to be used to validate orders.
self.trading_controls = []
# List of account controls to be checked on each bar.
self.account_controls = []
self._recorded_vars = {}
self.namespace = kwargs.get('namespace', {})
self._platform = kwargs.pop('platform', 'zipline')
self.logger = None
self.benchmark_return_source = None
# default components for transact
self.slippage = VolumeShareSlippage()
self.commission = PerShare()
self.instant_fill = kwargs.pop('instant_fill', False)
# set the capital base
self.capital_base = kwargs.pop('capital_base', DEFAULT_CAPITAL_BASE)
self.sim_params = kwargs.pop('sim_params', None)
if self.sim_params is None:
self.sim_params = create_simulation_parameters(
capital_base=self.capital_base,
start=kwargs.pop('start', None),
end=kwargs.pop('end', None)
)
self.perf_tracker = PerformanceTracker(self.sim_params)
# Update the TradingEnvironment with the provided asset metadata
self.trading_environment = kwargs.pop('env',
TradingEnvironment.instance())
self.trading_environment.update_asset_finder(
asset_finder=kwargs.pop('asset_finder', None),
asset_metadata=kwargs.pop('asset_metadata', None),
identifiers=kwargs.pop('identifiers', None)
)
# Pull in the environment's new AssetFinder for quick reference
self.asset_finder = self.trading_environment.asset_finder
self.init_engine(kwargs.pop('ffc_loader', None))
# Maps from name to Term
self._filters = {}
self._factors = {}
self._classifiers = {}
self.blotter = kwargs.pop('blotter', None)
if not self.blotter:
self.blotter = Blotter()
# Set the dt initally to the period start by forcing it to change
self.on_dt_changed(self.sim_params.period_start)
# The symbol lookup date specifies the date to use when resolving
# symbols to sids, and can be set using set_symbol_lookup_date()
self._symbol_lookup_date = None
self.portfolio_needs_update = True
self.account_needs_update = True
self.performance_needs_update = True
self._portfolio = None
self._account = None
self.history_container_class = kwargs.pop(
'history_container_class', HistoryContainer,
)
self.history_container = None
self.history_specs = {}
# If string is passed in, execute and get reference to
# functions.
self.algoscript = kwargs.pop('script', None)
self._initialize = None
self._before_trading_start = None
self._analyze = None
self.event_manager = EventManager()
if self.algoscript is not None:
filename = kwargs.pop('algo_filename', None)
if filename is None:
filename = '<string>'
code = compile(self.algoscript, filename, 'exec')
exec_(code, self.namespace)
self._initialize = self.namespace.get('initialize')
if 'handle_data' not in self.namespace:
raise ValueError('You must define a handle_data function.')
else:
self._handle_data = self.namespace['handle_data']
self._before_trading_start = \
self.namespace.get('before_trading_start')
# Optional analyze function, gets called after run
self._analyze = self.namespace.get('analyze')
elif kwargs.get('initialize') and kwargs.get('handle_data'):
if self.algoscript is not None:
raise ValueError('You can not set script and \
initialize/handle_data.')
self._initialize = kwargs.pop('initialize')
self._handle_data = kwargs.pop('handle_data')
self._before_trading_start = kwargs.pop('before_trading_start',
None)
self.event_manager.add_event(
zipline.utils.events.Event(
zipline.utils.events.Always(),
# We pass handle_data.__func__ to get the unbound method.
# We will explicitly pass the algorithm to bind it again.
self.handle_data.__func__,
),
prepend=True,
)
# If method not defined, NOOP
if self._initialize is None:
self._initialize = lambda x: None
# Alternative way of setting data_frequency for backwards
# compatibility.
if 'data_frequency' in kwargs:
self.data_frequency = kwargs.pop('data_frequency')
self._most_recent_data = None
# Prepare the algo for initialization
self.initialized = False
self.initialize_args = args
self.initialize_kwargs = kwargs
def init_engine(self, loader):
"""
Construct and save an FFCEngine from loader.
If loader is None, constructs a NoOpFFCEngine.
"""
if loader is not None:
self.engine = SimpleFFCEngine(
loader,
self.trading_environment.trading_days,
self.asset_finder,
)
else:
self.engine = NoOpFFCEngine()
def initialize(self, *args, **kwargs):
"""
Call self._initialize with `self` made available to Zipline API
functions.
"""
with ZiplineAPI(self):
self._initialize(self)
def before_trading_start(self, data):
if self._before_trading_start is None:
return
self._before_trading_start(self, data)
def handle_data(self, data):
self._most_recent_data = data
if self.history_container:
self.history_container.update(data, self.datetime)
self._handle_data(self, data)
# Unlike trading controls which remain constant unless placing an
# order, account controls can change each bar. Thus, must check
# every bar no matter if the algorithm places an order or not.
self.validate_account_controls()
def analyze(self, perf):
if self._analyze is None:
return
with ZiplineAPI(self):
self._analyze(self, perf)
def __repr__(self):
"""
N.B. this does not yet represent a string that can be used
to instantiate an exact copy of an algorithm.
However, it is getting close, and provides some value as something
that can be inspected interactively.
"""
return """
{class_name}(
capital_base={capital_base}
sim_params={sim_params},
initialized={initialized},
slippage={slippage},
commission={commission},
blotter={blotter},
recorded_vars={recorded_vars})
""".strip().format(class_name=self.__class__.__name__,
capital_base=self.capital_base,
sim_params=repr(self.sim_params),
initialized=self.initialized,
slippage=repr(self.slippage),
commission=repr(self.commission),
blotter=repr(self.blotter),
recorded_vars=repr(self.recorded_vars))
def _create_data_generator(self, source_filter, sim_params=None):
"""
Create a merged data generator using the sources attached to this
algorithm.
::source_filter:: is a method that receives events in date
sorted order, and returns True for those events that should be
processed by the zipline, and False for those that should be
skipped.
"""
if sim_params is None:
sim_params = self.sim_params
if self.benchmark_return_source is None:
if sim_params.data_frequency == 'minute' or \
sim_params.emission_rate == 'minute':
def update_time(date):
return self.trading_environment.get_open_and_close(date)[1]
else:
def update_time(date):
return date
benchmark_return_source = [
Event({'dt': update_time(dt),
'returns': ret,
'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
'source_id': 'benchmarks'})
for dt, ret in
self.trading_environment.benchmark_returns.iteritems()
if dt.date() >= sim_params.period_start.date() and
dt.date() <= sim_params.period_end.date()
]
else:
benchmark_return_source = self.benchmark_return_source
date_sorted = date_sorted_sources(*self.sources)
if source_filter:
date_sorted = filter(source_filter, date_sorted)
with_benchmarks = date_sorted_sources(benchmark_return_source,
date_sorted)
# Group together events with the same dt field. This depends on the
# events already being sorted.
return groupby(with_benchmarks, attrgetter('dt'))
def _create_generator(self, sim_params, source_filter=None):
"""
Create a basic generator setup using the sources to this algorithm.
::source_filter:: is a method that receives events in date
sorted order, and returns True for those events that should be
processed by the zipline, and False for those that should be
skipped.
"""
if not self.initialized:
self.initialize(*self.initialize_args, **self.initialize_kwargs)
self.initialized = True
if self.perf_tracker is None:
# HACK: When running with the `run` method, we set perf_tracker to
# None so that it will be overwritten here.
self.perf_tracker = PerformanceTracker(sim_params)
self.portfolio_needs_update = True
self.account_needs_update = True
self.performance_needs_update = True
self.data_gen = self._create_data_generator(source_filter, sim_params)
self.trading_client = AlgorithmSimulator(self, sim_params)
transact_method = transact_partial(self.slippage, self.commission)
self.set_transact(transact_method)
return self.trading_client.transform(self.data_gen)
def get_generator(self):
"""
Override this method to add new logic to the construction
of the generator. Overrides can use the _create_generator
method to get a standard construction generator.
"""
return self._create_generator(self.sim_params)
# TODO: make a new subclass, e.g. BatchAlgorithm, and move
# the run method to the subclass, and refactor to put the
# generator creation logic into get_generator.
def run(self, source, overwrite_sim_params=True,
benchmark_return_source=None):
"""Run the algorithm.
:Arguments:
source : can be either:
- pandas.DataFrame
- zipline source
- list of sources
If pandas.DataFrame is provided, it must have the
following structure:
* column names must be the different asset identifiers
* index must be DatetimeIndex
* array contents should be price info.
:Returns:
daily_stats : pandas.DataFrame
Daily performance metrics such as returns, alpha etc.
"""
# Ensure that source is a DataSource object
if isinstance(source, list):
if overwrite_sim_params:
warnings.warn("""List of sources passed, will not attempt to extract start and end
dates. Make sure to set the correct fields in sim_params passed to
__init__().""", UserWarning)
overwrite_sim_params = False
elif isinstance(source, pd.DataFrame):
# if DataFrame provided, map columns to sids and wrap
# in DataFrameSource
copy_frame = source.copy()
copy_frame.columns = \
self.asset_finder.map_identifier_index_to_sids(
source.columns, source.index[0]
)
source = DataFrameSource(copy_frame)
elif isinstance(source, pd.Panel):
# If Panel provided, map items to sids and wrap
# in DataPanelSource
copy_panel = source.copy()
copy_panel.items = self.asset_finder.map_identifier_index_to_sids(
source.items, source.major_axis[0]
)
source = DataPanelSource(copy_panel)
if isinstance(source, list):
self.set_sources(source)
else:
self.set_sources([source])
# Override sim_params if params are provided by the source.
if overwrite_sim_params:
if hasattr(source, 'start'):
self.sim_params.period_start = source.start
if hasattr(source, 'end'):
self.sim_params.period_end = source.end
# Changing period_start and period_close might require updating
# of first_open and last_close.
self.sim_params._update_internal()
# The sids field of the source is the reference for the universe at
# the start of the run
self._current_universe = set()
for source in self.sources:
for sid in source.sids:
self._current_universe.add(sid)
# Check that all sids from the source are accounted for in
# the AssetFinder. This retrieve call will raise an exception if the
# sid is not found.
for sid in self._current_universe:
self.asset_finder.retrieve_asset(sid)
# force a reset of the performance tracker, in case
# this is a repeat run of the algorithm.
self.perf_tracker = None
# create zipline
self.gen = self._create_generator(self.sim_params)
# Create history containers
if self.history_specs:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.sim_params.first_open,
self.sim_params.data_frequency,
)
# loop through simulated_trading, each iteration returns a
# perf dictionary
perfs = []
for perf in self.gen:
perfs.append(perf)
# convert perf dict to pandas dataframe
daily_stats = self._create_daily_stats(perfs)
self.analyze(daily_stats)
return daily_stats
def _create_daily_stats(self, perfs):
# create daily and cumulative stats dataframe
daily_perfs = []
# TODO: the loop here could overwrite expected properties
# of daily_perf. Could potentially raise or log a
# warning.
for perf in perfs:
if 'daily_perf' in perf:
perf['daily_perf'].update(
perf['daily_perf'].pop('recorded_vars')
)
perf['daily_perf'].update(perf['cumulative_risk_metrics'])
daily_perfs.append(perf['daily_perf'])
else:
self.risk_report = perf
daily_dts = [np.datetime64(perf['period_close'], utc=True)
for perf in daily_perfs]
daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)
return daily_stats
@api_method
def add_transform(self, transform, days=None):
"""
Ensures that the history container will have enough size to service
a simple transform.
:Arguments:
transform : string
The transform to add. must be an element of:
{'mavg', 'stddev', 'vwap', 'returns'}.
days : int <default=None>
The maximum amount of days you will want for this transform.
This is not needed for 'returns'.
"""
if transform not in {'mavg', 'stddev', 'vwap', 'returns'}:
raise ValueError('Invalid transform')
if transform == 'returns':
if days is not None:
raise ValueError('returns does use days')
self.add_history(2, '1d', 'price')
return
elif days is None:
raise ValueError('no number of days specified')
if self.sim_params.data_frequency == 'daily':
mult = 1
freq = '1d'
else:
mult = 390
freq = '1m'
bars = mult * days
self.add_history(bars, freq, 'price')
if transform == 'vwap':
self.add_history(bars, freq, 'volume')
@api_method
def get_environment(self, field='platform'):
env = {
'arena': self.sim_params.arena,
'data_frequency': self.sim_params.data_frequency,
'start': self.sim_params.first_open,
'end': self.sim_params.last_close,
'capital_base': self.sim_params.capital_base,
'platform': self._platform
}
if field == '*':
return env
else:
return env[field]
def add_event(self, rule=None, callback=None):
"""
Adds an event to the algorithm's EventManager.
"""
self.event_manager.add_event(
zipline.utils.events.Event(rule, callback),
)
@api_method
def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True):
"""
Schedules a function to be called with some timed rules.
"""
date_rule = date_rule or DateRuleFactory.every_day()
time_rule = ((time_rule or TimeRuleFactory.market_open())
if self.sim_params.data_frequency == 'minute' else
# If we are in daily mode the time_rule is ignored.
zipline.utils.events.Always())
self.add_event(
make_eventrule(date_rule, time_rule, half_days),
func,
)
@api_method
def record(self, *args, **kwargs):
"""
Track and record local variable (i.e. attributes) each day.
"""
# Make 2 objects both referencing the same iterator
args = [iter(args)] * 2
# Zip generates list entries by calling `next` on each iterator it
# receives. In this case the two iterators are the same object, so the
# call to next on args[0] will also advance args[1], resulting in zip
# returning (a,b) (c,d) (e,f) rather than (a,a) (b,b) (c,c) etc.
positionals = zip(*args)
for name, value in chain(positionals, iteritems(kwargs)):
self._recorded_vars[name] = value
@api_method
def symbol(self, symbol_str):
"""
Default symbol lookup for any source that directly maps the
symbol to the Asset (e.g. yahoo finance).
"""
# If the user has not set the symbol lookup date,
# use the period_end as the date for sybmol->sid resolution.
_lookup_date = self._symbol_lookup_date if self._symbol_lookup_date is not None \
else self.sim_params.period_end
return self.asset_finder.lookup_symbol_resolve_multiple(
symbol_str,
as_of_date=_lookup_date
)
@api_method
def symbols(self, *args):
"""
Default symbols lookup for any source that directly maps the
symbol to the Asset (e.g. yahoo finance).
"""
return [self.symbol(identifier) for identifier in args]
@api_method
def sid(self, a_sid):
"""
Default sid lookup for any source that directly maps the integer sid
to the Asset.
"""
return self.asset_finder.retrieve_asset(a_sid)
@api_method
def future_chain(self, root_symbol, as_of_date=None):
""" Look up a future chain with the specified parameters.
Parameters
----------
root_symbol : str
The root symbol of a future chain.
as_of_date : datetime.datetime or pandas.Timestamp or str, optional
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this date is
the primary contract, etc.
Returns
-------
FutureChain
The future chain matching the specified parameters.
Raises
------
RootSymbolNotFound
If a future chain could not be found for the given root symbol.
"""
if as_of_date:
try:
as_of_date = pd.Timestamp(as_of_date, tz='UTC')
except ValueError:
raise UnsupportedDatetimeFormat(input=as_of_date,
method='future_chain')
return FutureChain(
asset_finder=self.asset_finder,
get_datetime=self.get_datetime,
root_symbol=root_symbol.upper(),
as_of_date=as_of_date
)
def _calculate_order_value_amount(self, asset, value):
"""
Calculates how many shares/contracts to order based on the type of
asset being ordered.
"""
last_price = self.trading_client.current_data[asset].price
if tolerant_equals(last_price, 0):
zero_message = "Price of 0 for {psid}; can't infer value".format(
psid=asset
)
if self.logger:
self.logger.debug(zero_message)
# Don't place any order
return 0
if isinstance(asset, Future):
value_multiplier = asset.contract_multiplier
else:
value_multiplier = 1
return value / (last_price * value_multiplier)
@api_method
def order(self, sid, amount,
limit_price=None,
stop_price=None,
style=None):
"""
Place an order using the specified parameters.
"""
def round_if_near_integer(a, epsilon=1e-4):
"""
Round a to the nearest integer if that integer is within an epsilon
of a.
"""
if abs(a - round(a)) <= epsilon:
return round(a)
else:
return a
# Truncate to the integer share count that's either within .0001 of
# amount or closer to zero.
# E.g. 3.9999 -> 4.0; 5.5 -> 5.0; -5.5 -> -5.0
amount = int(round_if_near_integer(amount))
# Raises a ZiplineError if invalid parameters are detected.
self.validate_order_params(sid,
amount,
limit_price,
stop_price,
style)
# Convert deprecated limit_price and stop_price parameters to use
# ExecutionStyle objects.
style = self.__convert_order_params_for_blotter(limit_price,
stop_price,
style)
return self.blotter.order(sid, amount, style)
def validate_order_params(self,
asset,
amount,
limit_price,
stop_price,
style):
"""
Helper method for validating parameters to the order API function.
Raises an UnsupportedOrderParameters if invalid arguments are found.
"""
if not self.initialized:
raise OrderDuringInitialize(
msg="order() can only be called from within handle_data()"
)
if style:
if limit_price:
raise UnsupportedOrderParameters(
msg="Passing both limit_price and style is not supported."
)
if stop_price:
raise UnsupportedOrderParameters(
msg="Passing both stop_price and style is not supported."
)
if not isinstance(asset, Asset):
raise UnsupportedOrderParameters(
msg="Passing non-Asset argument to 'order()' is not supported."
" Use 'sid()' or 'symbol()' methods to look up an Asset."
)
for control in self.trading_controls:
control.validate(asset,
amount,
self.updated_portfolio(),
self.get_datetime(),
self.trading_client.current_data)
@staticmethod
def __convert_order_params_for_blotter(limit_price, stop_price, style):
"""
Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None).
"""
# TODO_SS: DeprecationWarning for usage of limit_price and stop_price.
if style:
assert (limit_price, stop_price) == (None, None)
return style
if limit_price and stop_price:
return StopLimitOrder(limit_price, stop_price)
if limit_price:
return LimitOrder(limit_price)
if stop_price:
return StopOrder(stop_price)
else:
return MarketOrder()
@api_method
def order_value(self, sid, value,
limit_price=None, stop_price=None, style=None):
"""
Place an order by desired value rather than desired number of shares.
If the requested sid is found in the universe, the requested value is
divided by its price to imply the number of shares to transact.
If the Asset being ordered is a Future, the 'value' calculated
is actually the exposure, as Futures have no 'value'.
value > 0 :: Buy/Cover
value < 0 :: Sell/Short
Market order: order(sid, value)
Limit order: order(sid, value, limit_price)
Stop order: order(sid, value, None, stop_price)
StopLimit order: order(sid, value, limit_price, stop_price)
"""
amount = self._calculate_order_value_amount(sid, value)
return self.order(sid, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@property
def recorded_vars(self):
return copy(self._recorded_vars)
@property
def portfolio(self):
return self.updated_portfolio()
def updated_portfolio(self):
if self.portfolio_needs_update:
self._portfolio = \
self.perf_tracker.get_portfolio(self.performance_needs_update)
self.portfolio_needs_update = False
self.performance_needs_update = False
return self._portfolio
@property
def account(self):
return self.updated_account()
def updated_account(self):
if self.account_needs_update:
self._account = \
self.perf_tracker.get_account(self.performance_needs_update)
self.account_needs_update = False
self.performance_needs_update = False
return self._account
def set_logger(self, logger):
self.logger = logger
def on_dt_changed(self, dt):
"""
Callback triggered by the simulation loop whenever the current dt
changes.
Any logic that should happen exactly once at the start of each datetime
group should happen here.
"""
assert isinstance(dt, datetime), \
"Attempt to set algorithm's current time with non-datetime"
assert dt.tzinfo == pytz.utc, \
"Algorithm expects a utc datetime"
self.datetime = dt
self.perf_tracker.set_date(dt)
self.blotter.set_date(dt)
@api_method
def get_datetime(self, tz=None):
"""
Returns the simulation datetime.
"""
dt = self.datetime
assert dt.tzinfo == pytz.utc, "Algorithm should have a utc datetime"
if tz is not None:
# Convert to the given timezone passed as a string or tzinfo.
if isinstance(tz, string_types):
tz = pytz.timezone(tz)
dt = dt.astimezone(tz)
return dt # datetime.datetime objects are immutable.
def set_transact(self, transact):
"""
Set the method that will be called to create a
transaction from open orders and trade events.
"""
self.blotter.transact = transact
def update_dividends(self, dividend_frame):
"""
Set DataFrame used to process dividends. DataFrame columns should
contain at least the entries in zp.DIVIDEND_FIELDS.
"""
self.perf_tracker.update_dividends(dividend_frame)
@api_method
def set_slippage(self, slippage):
if not isinstance(slippage, SlippageModel):
raise UnsupportedSlippageModel()
if self.initialized:
raise OverrideSlippagePostInit()
self.slippage = slippage
@api_method
def set_commission(self, commission):
if not isinstance(commission, (PerShare, PerTrade, PerDollar)):
raise UnsupportedCommissionModel()
if self.initialized:
raise OverrideCommissionPostInit()
self.commission = commission
@api_method
def set_symbol_lookup_date(self, dt):
"""
Set the date for which symbols will be resolved to their sids
(symbols may map to different firms or underlying assets at
different times)
"""
try:
self._symbol_lookup_date = pd.Timestamp(dt, tz='UTC')
except ValueError:
raise UnsupportedDatetimeFormat(input=dt,
method='set_symbol_lookup_date')
def set_sources(self, sources):
assert isinstance(sources, list)
self.sources = sources
# Remain backwards compatibility
@property
def data_frequency(self):
return self.sim_params.data_frequency
@data_frequency.setter
def data_frequency(self, value):
assert value in ('daily', 'minute')
self.sim_params.data_frequency = value
@api_method
def order_percent(self, sid, percent,
limit_price=None, stop_price=None, style=None):
"""
Place an order in the specified asset corresponding to the given
percent of the current portfolio value.
Note that percent must expressed as a decimal (0.50 means 50\%).
"""
value = self.portfolio.portfolio_value * percent
return self.order_value(sid, value,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target number of shares. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target number of shares and the
current number of shares.
"""
if sid in self.portfolio.positions:
current_position = self.portfolio.positions[sid].amount
req_shares = target - current_position
return self.order(sid, req_shares,
limit_price=limit_price,
stop_price=stop_price,
style=style)
else:
return self.order(sid, target,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target_value(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
"""
target_amount = self._calculate_order_value_amount(sid, target)
return self.order_target(sid, target_amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target_percent(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target percent of the
current portfolio value. If the position doesn't already exist, this is
equivalent to placing a new order. If the position does exist, this is
equivalent to placing an order for the difference between the target
percent and the current percent.
Note that target must expressed as a decimal (0.50 means 50\%).
"""
target_value = self.portfolio.portfolio_value * target
return self.order_target_value(sid, target_value,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def get_open_orders(self, sid=None):
if sid is None:
return {
key: [order.to_api_obj() for order in orders]
for key, orders in iteritems(self.blotter.open_orders)
if orders
}
if sid in self.blotter.open_orders:
orders = self.blotter.open_orders[sid]
return [order.to_api_obj() for order in orders]
return []
@api_method
def get_order(self, order_id):
if order_id in self.blotter.orders:
return self.blotter.orders[order_id].to_api_obj()
@api_method
def cancel_order(self, order_param):
order_id = order_param
if isinstance(order_param, zipline.protocol.Order):
order_id = order_param.id
self.blotter.cancel(order_id)
@api_method
def add_history(self, bar_count, frequency, field, ffill=True):
data_frequency = self.sim_params.data_frequency
history_spec = HistorySpec(bar_count, frequency, field, ffill,
data_frequency=data_frequency)
self.history_specs[history_spec.key_str] = history_spec
if self.initialized:
if self.history_container:
self.history_container.ensure_spec(
history_spec, self.datetime, self._most_recent_data,
)
else:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.sim_params.first_open,
self.sim_params.data_frequency,
)
def get_history_spec(self, bar_count, frequency, field, ffill):
spec_key = HistorySpec.spec_key(bar_count, frequency, field, ffill)
if spec_key not in self.history_specs:
data_freq = self.sim_params.data_frequency
spec = HistorySpec(
bar_count,
frequency,
field,
ffill,
data_frequency=data_freq,
)
self.history_specs[spec_key] = spec
if not self.history_container:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.datetime,
self.sim_params.data_frequency,
bar_data=self._most_recent_data,
)
self.history_container.ensure_spec(
spec, self.datetime, self._most_recent_data,
)
return self.history_specs[spec_key]
@api_method
def history(self, bar_count, frequency, field, ffill=True):
history_spec = self.get_history_spec(
bar_count,
frequency,
field,
ffill,
)
return self.history_container.get_history(history_spec, self.datetime)
####################
# Account Controls #
####################
def register_account_control(self, control):
"""
Register a new AccountControl to be checked on each bar.
"""
if self.initialized:
raise RegisterAccountControlPostInit()
self.account_controls.append(control)
def validate_account_controls(self):
for control in self.account_controls:
control.validate(self.updated_portfolio(),
self.updated_account(),
self.get_datetime(),
self.trading_client.current_data)
@api_method
def set_max_leverage(self, max_leverage=None):
"""
Set a limit on the maximum leverage of the algorithm.
"""
control = MaxLeverage(max_leverage)
self.register_account_control(control)
####################
# Trading Controls #
####################
def register_trading_control(self, control):
"""
Register a new TradingControl to be checked prior to order calls.
"""
if self.initialized:
raise RegisterTradingControlPostInit()
self.trading_controls.append(control)
@api_method
def set_max_position_size(self,
sid=None,
max_shares=None,
max_notional=None):
"""
Set a limit on the number of shares and/or dollar value held for the
given sid. Limits are treated as absolute values and are enforced at
the time that the algo attempts to place an order for sid. This means
that it's possible to end up with more than the max number of shares
due to splits/dividends, and more than the max notional due to price
improvement.
If an algorithm attempts to place an order that would result in
increasing the absolute value of shares/dollar value exceeding one of
these limits, raise a TradingControlException.
"""
control = MaxPositionSize(asset=sid,
max_shares=max_shares,
max_notional=max_notional)
self.register_trading_control(control)
@api_method
def set_max_order_size(self, sid=None, max_shares=None, max_notional=None):
"""
Set a limit on the number of shares and/or dollar value of any single
order placed for sid. Limits are treated as absolute values and are
enforced at the time that the algo attempts to place an order for sid.
If an algorithm attempts to place an order that would result in
exceeding one of these limits, raise a TradingControlException.
"""
control = MaxOrderSize(asset=sid,
max_shares=max_shares,
max_notional=max_notional)
self.register_trading_control(control)
@api_method
def set_max_order_count(self, max_count):
"""
Set a limit on the number of orders that can be placed within the given
time interval.
"""
control = MaxOrderCount(max_count)
self.register_trading_control(control)
@api_method
def set_do_not_order_list(self, restricted_list):
"""
Set a restriction on which sids can be ordered.
"""
control = RestrictedListOrder(restricted_list)
self.register_trading_control(control)
@api_method
def set_long_only(self):
"""
Set a rule specifying that this algorithm cannot take short positions.
"""
self.register_trading_control(LongOnly())
###########
# FFC API #
###########
@api_method
@require_not_initialized(AddTermPostInit())
def add_factor(self, factor, name):
if name in self._factors:
raise ValueError("Name %r is already a factor!" % name)
self._factors[name] = factor
@api_method
@require_not_initialized(AddTermPostInit())
def add_filter(self, filter):
name = "anon_filter_%d" % len(self._filters)
self._filters[name] = filter
# Note: add_classifier is not yet implemented since you can't do anything
# useful with classifiers yet.
def _all_terms(self):
# Merge all three dicts.
return dict(
chain.from_iterable(
iteritems(terms)
for terms in (self._filters, self._factors, self._classifiers)
)
)
def compute_factor_matrix(self, start_date):
"""
Compute a factor matrix starting at start_date.
"""
days = self.trading_environment.trading_days
start_date_loc = days.get_loc(start_date)
sim_end = self.sim_params.last_close.normalize()
end_loc = min(start_date_loc + 252, days.get_loc(sim_end))
end_date = days[end_loc]
return self.engine.factor_matrix(
self._all_terms(),
start_date,
end_date,
), end_date
def current_universe(self):
return self._current_universe
@classmethod
def all_api_methods(cls):
"""
Return a list of all the TradingAlgorithm API methods.
"""
return [
fn for fn in itervalues(vars(cls))
if getattr(fn, 'is_api_method', False)
]
| apache-2.0 |
uhjish/seaborn | seaborn/timeseries.py | 13 | 15218 | """Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function is intended to be used with data where observations are
nested within sampling units that were measured at multiple timepoints.
It can take data specified either as a long-form (tidy) DataFrame or as an
ndarray with dimensions (unit, time) The interpretation of some of the
other parameters changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters. When
using a DataFrame, the index has to be sequential.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interaval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette : seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
legend : bool, optional
If ``True`` and there is a ``condition`` variable, add a legend to
the plot.
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
Examples
--------
Plot a trace with translucent confidence bands:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(22)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> x = np.linspace(0, 15, 31)
>>> data = np.sin(x) + np.random.rand(10, 31) + np.random.randn(10, 1)
>>> ax = sns.tsplot(data=data)
Plot a long-form dataframe with several conditions:
.. plot::
:context: close-figs
>>> gammas = sns.load_dataset("gammas")
>>> ax = sns.tsplot(time="timepoint", value="BOLD signal",
... unit="subject", condition="ROI",
... data=gammas)
Use error bars at the positions of the observations:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="ci_bars", color="g")
Don't interpolate between the observations:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> ax = sns.tsplot(data=data, err_style="ci_bars", interpolate=False)
Show multiple confidence bands:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, ci=[68, 95], color="m")
Use a different estimator:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, estimator=np.median)
Show each bootstrap resample:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="boot_traces", n_boot=500)
Show the trace from each sampling unit:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="unit_traces")
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, color=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
| bsd-3-clause |
bhilburn/gnuradio | gr-fec/python/fec/polar/channel_construction.py | 17 | 4537 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
[0] Erdal Arikan: 'Channel Polarization: A Method for Constructing Capacity-Achieving Codes for Symmetric Binary-Input Memoryless Channels', 2009
foundational paper for polar codes.
'''
from channel_construction_bec import calculate_bec_channel_capacities
from channel_construction_bec import design_snr_to_bec_eta
from channel_construction_bec import bhattacharyya_bounds
from channel_construction_awgn import tal_vardy_tpm_algorithm
from helper_functions import *
Z_PARAM_FIRST_HEADER_LINE = "Bhattacharyya parameters (Z-parameters) for a polar code"
def get_frozen_bit_indices_from_capacities(chan_caps, nfrozen):
indexes = np.array([], dtype=int)
while indexes.size < nfrozen:
index = np.argmin(chan_caps)
indexes = np.append(indexes, index)
chan_caps[index] = 2.0 # make absolutely sure value is out of range!
return np.sort(indexes)
def get_frozen_bit_indices_from_z_parameters(z_params, nfrozen):
indexes = np.array([], dtype=int)
while indexes.size < nfrozen:
index = np.argmax(z_params)
indexes = np.append(indexes, index)
z_params[index] = -1.0
return np.sort(indexes)
def get_bec_frozen_indices(nblock, kfrozen, eta):
bec_caps = calculate_bec_channel_capacities(eta, nblock)
positions = get_frozen_bit_indices_from_capacities(bec_caps, kfrozen)
return positions
def get_frozen_bit_mask(frozen_indices, block_size):
frozen_mask = np.zeros(block_size, dtype=int)
frozen_mask[frozen_indices] = 1
return frozen_mask
def frozen_bit_positions(block_size, info_size, design_snr=0.0):
if not design_snr > -1.5917:
print('bad value for design_nsr, must be > -1.5917! default=0.0')
design_snr = 0.0
eta = design_snr_to_bec_eta(design_snr)
return get_bec_frozen_indices(block_size, block_size - info_size, eta)
def generate_filename(block_size, design_snr, mu):
filename = "polar_code_z_parameters_N" + str(int(block_size))
filename += "_SNR" + str(float(design_snr)) + "_MU" + str(int(mu)) + ".polar"
return filename
def default_dir():
dir_def = "~/.gnuradio/polar/"
import os
path = os.path.expanduser(dir_def)
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
return path
def save_z_parameters(z_params, block_size, design_snr, mu, alt_construction_method='Tal-Vardy algorithm'):
path = default_dir()
filename = generate_filename(block_size, design_snr, mu)
header = Z_PARAM_FIRST_HEADER_LINE + "\n"
header += "Channel construction method: " + alt_construction_method + "\n"
header += "Parameters:\n"
header += "block_size=" + str(block_size) + "\n"
header += "design_snr=" + str(design_snr) + "\n"
header += "mu=" + str(mu)
np.savetxt(path + filename, z_params, header=header)
def load_z_parameters(block_size, design_snr, mu):
path = default_dir()
filename = generate_filename(block_size, design_snr, mu)
full_file = path + filename
import os
if not os.path.isfile(full_file):
z_params = tal_vardy_tpm_algorithm(block_size, design_snr, mu)
save_z_parameters(z_params, block_size, design_snr, mu)
z_params = np.loadtxt(full_file)
return z_params
def main():
np.set_printoptions(precision=3, linewidth=150)
print 'channel construction Bhattacharyya bounds by Arikan'
n = 10
m = 2 ** n
k = m // 2
design_snr = 0.0
mu = 32
z_params = load_z_parameters(m, design_snr, mu)
z_bounds = bhattacharyya_bounds(design_snr, m)
print(z_params[-10:])
if 0:
import matplotlib.pyplot as plt
plt.plot(z_params)
plt.plot(z_bounds)
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
CGATOxford/UMI-tools | umi_tools/whitelist_methods.py | 1 | 28208 | '''
whitelist_methods.py - Methods for whitelisting cell barcodes
=============================================================
'''
import itertools
import collections
import matplotlib
import copy
import regex
# require to run on systems with no X11
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np
import numpy.matlib as npm
from scipy.stats import gaussian_kde
from scipy.signal import argrelextrema
import umi_tools.Utilities as U
from umi_tools._dedup_umi import edit_distance
import pybktree
def getKneeEstimateDensity(cell_barcode_counts,
expect_cells=False,
cell_number=False,
plotfile_prefix=None):
''' estimate the number of "true" cell barcodes using a gaussian
density-based method
input:
cell_barcode_counts = dict(key = barcode, value = count)
expect_cells (optional) = define the expected number of cells
cell_number (optional) = define number of cell barcodes to accept
plotfile_prefix = (optional) prefix for plots
returns:
List of true barcodes
'''
# very low abundance cell barcodes are filtered out (< 0.001 *
# the most abundant)
threshold = 0.001 * cell_barcode_counts.most_common(1)[0][1]
counts = sorted(cell_barcode_counts.values(), reverse=True)
counts_thresh = [x for x in counts if x > threshold]
log_counts = np.log10(counts_thresh)
# guassian density with hardcoded bw
density = gaussian_kde(log_counts, bw_method=0.1)
xx_values = 10000 # how many x values for density plot
xx = np.linspace(log_counts.min(), log_counts.max(), xx_values)
local_min = None
if cell_number: # we have a prior hard expectation on the number of cells
threshold = counts[cell_number]
else:
local_mins = argrelextrema(density(xx), np.less)[0]
local_mins_counts = []
for poss_local_min in local_mins[::-1]:
passing_threshold = sum([y > np.power(10, xx[poss_local_min])
for x, y in cell_barcode_counts.items()])
local_mins_counts.append(passing_threshold)
if not local_min: # if we have selected a local min yet
if expect_cells: # we have a "soft" expectation
if (passing_threshold > expect_cells * 0.1 and
passing_threshold <= expect_cells):
local_min = poss_local_min
else: # we have no prior expectation
# TS: In abscence of any expectation (either hard or soft),
# this set of heuristic thresholds are used to decide
# which local minimum to select.
# This is very unlikely to be the best way to achieve this!
if (poss_local_min >= 0.2 * xx_values and
(log_counts.max() - xx[poss_local_min] > 0.5 or
xx[poss_local_min] < log_counts.max()/2)):
local_min = poss_local_min
if local_min is not None:
threshold = np.power(10, xx[local_min])
if cell_number or local_min is not None:
final_barcodes = set([
x for x, y in cell_barcode_counts.items() if y > threshold])
else:
final_barcodes = None
if plotfile_prefix:
# colour-blind friendly colours - https://gist.github.com/thriveth/8560036
CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
user_line = mlines.Line2D(
[], [], color=CB_color_cycle[0], ls="dashed",
markersize=15, label='User-defined')
selected_line = mlines.Line2D(
[], [], color=CB_color_cycle[0], ls="dashed", markersize=15, label='Selected')
rejected_line = mlines.Line2D(
[], [], color=CB_color_cycle[3], ls="dashed", markersize=15, label='Rejected')
# make density plot
fig = plt.figure()
fig1 = fig.add_subplot(111)
fig1.plot(xx, density(xx), 'k')
fig1.set_xlabel("Count per cell (log10)")
fig1.set_ylabel("Density")
if cell_number:
fig1.axvline(np.log10(threshold), ls="dashed", color=CB_color_cycle[0])
lgd = fig1.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
handles=[user_line],
title="Cell threshold")
elif local_min is None: # no local_min was accepted
for pos in xx[local_mins]:
fig1.axvline(x=pos, ls="dashed", color=CB_color_cycle[3])
lgd = fig1.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
handles=[selected_line, rejected_line],
title="Possible thresholds")
else:
for pos in xx[local_mins]:
if pos == xx[local_min]: # selected local minima
fig1.axvline(x=xx[local_min], ls="dashed", color=CB_color_cycle[0])
else:
fig1.axvline(x=pos, ls="dashed", color=CB_color_cycle[3])
lgd = fig1.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
handles=[selected_line, rejected_line],
title="Possible thresholds")
fig.savefig("%s_cell_barcode_count_density.png" % plotfile_prefix,
bbox_extra_artists=(lgd,), bbox_inches='tight')
# make knee plot
fig = plt.figure()
fig2 = fig.add_subplot(111)
fig2.plot(range(0, len(counts)), np.cumsum(counts), c="black")
xmax = len(counts)
if local_min is not None:
# reasonable maximum x-axis value
xmax = min(len(final_barcodes) * 5, xmax)
fig2.set_xlim((0 - (0.01 * xmax), xmax))
fig2.set_xlabel("Rank")
fig2.set_ylabel("Cumulative count")
if cell_number:
fig2.axvline(x=cell_number, ls="dashed", color=CB_color_cycle[0])
lgd = fig2.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
handles=[user_line],
title="Cell threshold")
elif local_min is None: # no local_min was accepted
for local_mins_count in local_mins_counts:
fig2.axvline(x=local_mins_count, ls="dashed",
color=CB_color_cycle[3])
lgd = fig2.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
handles=[selected_line, rejected_line],
title="Possible thresholds")
else:
for local_mins_count in local_mins_counts:
if local_mins_count == len(final_barcodes): # selected local minima
fig2.axvline(x=local_mins_count, ls="dashed",
color=CB_color_cycle[0])
else:
fig2.axvline(x=local_mins_count, ls="dashed",
color=CB_color_cycle[3])
lgd = fig2.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
handles=[selected_line, rejected_line],
title="Possible thresholds")
fig.savefig("%s_cell_barcode_knee.png" % plotfile_prefix,
bbox_extra_artists=(lgd,), bbox_inches='tight')
if local_min is not None:
colours_selected = [CB_color_cycle[0] for x in range(0, len(final_barcodes))]
colours_rejected = ["black" for x in range(0, len(counts)-len(final_barcodes))]
colours = colours_selected + colours_rejected
else:
colours = ["black" for x in range(0, len(counts))]
fig = plt.figure()
fig3 = fig.add_subplot(111)
fig3.scatter(x=range(1, len(counts)+1), y=counts,
c=colours, s=10, linewidths=0)
fig3.loglog()
fig3.set_xlim(0, len(counts)*1.25)
fig3.set_xlabel('Barcode index')
fig3.set_ylabel('Count')
if cell_number:
fig3.axvline(x=cell_number, ls="dashed", color=CB_color_cycle[0])
lgd = fig3.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
handles=[user_line],
title="Cell threshold")
elif local_min is None: # no local_min was accepted
for local_mins_count in local_mins_counts:
fig3.axvline(x=local_mins_count, ls="dashed",
color=CB_color_cycle[3])
lgd = fig3.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
handles=[selected_line, rejected_line],
title="Possible thresholds")
else:
for local_mins_count in local_mins_counts:
if local_mins_count == len(final_barcodes): # selected local minima
fig3.axvline(x=local_mins_count, ls="dashed",
color=CB_color_cycle[0])
else:
fig3.axvline(x=local_mins_count, ls="dashed",
color=CB_color_cycle[3])
lgd = fig3.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
handles=[selected_line, rejected_line],
title="Possible thresholds")
fig.savefig("%s_cell_barcode_counts.png" % plotfile_prefix,
bbox_extra_artists=(lgd,), bbox_inches='tight')
if not cell_number:
with U.openFile("%s_cell_thresholds.tsv" % plotfile_prefix, "w") as outf:
outf.write("count\taction\n")
for local_mins_count in local_mins_counts:
if local_min and local_mins_count == len(final_barcodes):
threshold_type = "Selected"
else:
threshold_type = "Rejected"
outf.write("%s\t%s\n" % (local_mins_count, threshold_type))
return final_barcodes
def getKneeEstimateDistance(cell_barcode_counts,
cell_number=False,
plotfile_prefix=None):
''' estimate the number of "true" cell barcodes via a knee method
which finds the point with maximum distance
input:
cell_barcode_counts = dict(key = barcode, value = count)
cell_number (optional) = define number of cell barcodes to accept
plotfile_prefix = (optional) prefix for plots
returns:
List of true barcodes
'''
def getKneeDistance(values):
'''
This function is based on
https://stackoverflow.com/questions/2018178/finding-the-best-trade-off-point-on-a-curve
and https://dataplatform.cloud.ibm.com/analytics/notebooks/54d79c2a-f155-40ec-93ec-ed05b58afa39/view?access_token=6d8ec910cf2a1b3901c721fcb94638563cd646fe14400fecbb76cea6aaae2fb1
The idea is to draw a line from the first to last point on the
cumulative counts curve and then find the point on the curve
which is the maximum distance away from this line
'''
# get coordinates of all the points
nPoints = len(values)
allCoord = np.vstack((range(nPoints), values)).T
# get the first point
firstPoint = allCoord[0]
# get vector between first and last point - this is the line
lineVec = allCoord[-1] - allCoord[0]
lineVecNorm = lineVec / np.sqrt(np.sum(lineVec**2))
# find the distance from each point to the line:
# vector between all points and first point
vecFromFirst = allCoord - firstPoint
# To calculate the distance to the line, we split vecFromFirst into two
# components, one that is parallel to the line and one that is perpendicular
# Then, we take the norm of the part that is perpendicular to the line and
# get the distance.
# We find the vector parallel to the line by projecting vecFromFirst onto
# the line. The perpendicular vector is vecFromFirst - vecFromFirstParallel
# We project vecFromFirst by taking the scalar product of the vector with
# the unit vector that points in the direction of the line (this gives us
# the length of the projection of vecFromFirst onto the line). If we
# multiply the scalar product by the unit vector, we have vecFromFirstParallel
scalarProduct = np.sum(
vecFromFirst * npm.repmat(lineVecNorm, nPoints, 1), axis=1)
vecFromFirstParallel = np.outer(scalarProduct, lineVecNorm)
vecToLine = vecFromFirst - vecFromFirstParallel
# distance to line is the norm of vecToLine
distToLine = np.sqrt(np.sum(vecToLine ** 2, axis=1))
# knee/elbow is the point with max distance value
idxOfBestPoint = np.argmax(distToLine)
return(distToLine, idxOfBestPoint)
counts = [x[1] for x in cell_barcode_counts.most_common()]
values = list(np.cumsum(counts))
# We need to perform the distance knee iteratively with reduced
# number of CBs since it's sensitive to the number of CBs input
# and overestimates if too many CBs are used
previous_idxOfBestPoint = 0
distToLine, idxOfBestPoint = getKneeDistance(values)
if idxOfBestPoint == 0:
raise ValueError("Something's gone wrong here!!")
max_iterations = 100
iterations = 0
while idxOfBestPoint - previous_idxOfBestPoint != 0:
previous_idxOfBestPoint = idxOfBestPoint
iterations += 1
if iterations > max_iterations:
break
distToLine, idxOfBestPoint = getKneeDistance(values[:idxOfBestPoint*3])
knee_final_barcodes = [x[0] for x in cell_barcode_counts.most_common()[
:idxOfBestPoint+1]]
if cell_number:
threshold = counts[cell_number]
final_barcodes = set([
x for x, y in cell_barcode_counts.items() if y > threshold])
else:
final_barcodes = knee_final_barcodes
if plotfile_prefix:
# colour-blind friendly colours - https://gist.github.com/thriveth/8560036
CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
user_line = mlines.Line2D(
[], [], color=CB_color_cycle[2], ls="dashed",
markersize=15, label='User-defined')
selected_line = mlines.Line2D(
[], [], color=CB_color_cycle[0], ls="dashed", markersize=15, label='Knee')
# plot of the original curve and its corresponding distances
plt.figure(figsize=(12, 6))
plt.plot(distToLine, label='Distance', color='r')
plt.plot(values, label='Cumulative', color='b')
plt.plot([idxOfBestPoint], values[idxOfBestPoint], marker='o',
markersize=8, color="red", label='Knee')
if cell_number:
plt.axvline(x=cell_number, ls="dashed",
color=CB_color_cycle[2], label="User-defined")
plt.legend()
plt.savefig("%s_cell_barcode_knee.png" % plotfile_prefix)
colours_selected = [CB_color_cycle[0] for x in range(0, len(final_barcodes))]
colours_rejected = ["black" for x in range(0, len(counts)-len(final_barcodes))]
colours = colours_selected + colours_rejected
fig = plt.figure()
fig3 = fig.add_subplot(111)
fig3.scatter(x=range(1, len(counts)+1), y=counts,
c=colours, s=10, linewidths=0)
fig3.loglog()
fig3.set_xlim(0, len(counts)*1.25)
fig3.set_xlabel('Barcode index')
fig3.set_ylabel('Count')
fig3.axvline(x=len(knee_final_barcodes), ls="dashed", color=CB_color_cycle[0])
if cell_number:
fig3.axvline(x=cell_number, ls="dashed", color=CB_color_cycle[2])
lgd = fig3.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
handles=[selected_line, user_line],
title="User threshold")
else:
lgd = fig3.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
handles=[selected_line],
title="Knee threshold")
fig.savefig("%s_cell_barcode_counts.png" % plotfile_prefix,
bbox_extra_artists=(lgd,), bbox_inches='tight')
if not cell_number:
with U.openFile("%s_cell_thresholds.tsv" % plotfile_prefix, "w") as outf:
outf.write("count\n")
outf.write("%s\n" % idxOfBestPoint)
return(final_barcodes)
# Function contributed by https://github.com/redst4r
def getErrorCorrectMapping(cell_barcodes, whitelist, threshold=1):
''' Find the mappings between true and false cell barcodes based
on an edit distance threshold.
Any cell barcode within the threshold to more than one whitelist
barcode will be excluded'''
true_to_false = collections.defaultdict(set)
# Unexpected results with cythonise hamming distance so redefine in python here
def hamming_distance(first, second):
''' returns the edit distance/hamming distances between
its two arguements '''
# We only want to define hamming distance for barcodes with the same length
if len(first) != len(second):
return np.inf
dist = sum([not a == b for a, b in zip(first, second)])
return dist
whitelist = set([str(x) for x in whitelist])
U.info('building bktree')
tree2 = pybktree.BKTree(hamming_distance, whitelist)
U.info('done building bktree')
for cell_barcode in cell_barcodes:
if cell_barcode in whitelist:
# if the barcode is already whitelisted, no need to add
continue
# get all members of whitelist that are at distance 1
candidates = [white_cell for
d, white_cell in
tree2.find(cell_barcode, threshold) if
d > 0]
if len(candidates) == 0:
# the cell doesnt match to any whitelisted barcode,
# hence we have to drop it
# (as it cannot be asscociated with any frequent barcde)
continue
elif len(candidates) == 1:
white_cell_str = candidates[0]
true_to_false[white_cell_str].add(cell_barcode)
else:
# more than on whitelisted candidate:
# we drop it as its not uniquely assignable
continue
return true_to_false
def getCellWhitelist(cell_barcode_counts,
knee_method="distance",
expect_cells=False,
cell_number=False,
error_correct_threshold=0,
plotfile_prefix=None):
if knee_method == "distance":
cell_whitelist = getKneeEstimateDistance(
cell_barcode_counts, cell_number, plotfile_prefix)
elif knee_method == "density":
cell_whitelist = getKneeEstimateDensity(
cell_barcode_counts, expect_cells, cell_number, plotfile_prefix)
else:
raise ValueError("knee_method must be 'distance' or 'density'")
U.info("Finished - whitelist determination")
true_to_false_map = None
if cell_whitelist and error_correct_threshold > 0:
U.info("Starting - finding putative error cell barcodes")
true_to_false_map = getErrorCorrectMapping(
cell_barcode_counts.keys(), cell_whitelist,
error_correct_threshold)
U.info("Finished - finding putative error cell barcodes")
return cell_whitelist, true_to_false_map
def getUserDefinedBarcodes(whitelist_tsv, whitelist_tsv2=None,
getErrorCorrection=False,
deriveErrorCorrection=False,
threshold=1):
'''
whitelist_tsv: tab-separated file with whitelisted barcodes. First
field should be whitelist barcodes. Second field [optional] should
be comma-separated barcodes which are to be corrected to the
barcode in the first field.
whitelist_tsv2: as above but for read2s
getErrorCorrection: extract the second field in whitelist_tsv and
return a map of non-whitelist:whitelist
deriveErrorCorrection: return a map of non-whitelist:whitelist
using a simple edit distance threshold
'''
base2errors = {"A": ["T", "C", "G", "N"],
"T": ["A", "C", "G", "N"],
"C": ["T", "A", "G", "N"],
"G": ["T", "C", "A", "N"]}
whitelist = []
if getErrorCorrection or deriveErrorCorrection:
false_to_true_map = {}
else:
false_to_true_map = None
def singleBarcodeGenerator(whitelist_tsv):
with U.openFile(whitelist_tsv, "r") as inf:
for line in inf:
if line.startswith('#'):
continue
line = line.strip().split("\t")
yield(line[0])
def pairedBarcodeGenerator(whitelist_tsv, whitelist_tsv2):
whitelist1 = []
whitelist2 = []
with U.openFile(whitelist_tsv, "r") as inf:
for line in inf:
if line.startswith('#'):
continue
line = line.strip().split("\t")
whitelist1.append(line[0])
with U.openFile(whitelist_tsv2, "r") as inf2:
for line in inf2:
if line.startswith('#'):
continue
line = line.strip().split("\t")
whitelist2.append(line[0])
for w1, w2 in itertools.product(whitelist1, whitelist2):
yield(w1 + w2)
if deriveErrorCorrection:
if whitelist_tsv2:
whitelist_barcodes = pairedBarcodeGenerator(whitelist_tsv, whitelist_tsv2)
else:
whitelist_barcodes = singleBarcodeGenerator(whitelist_tsv)
for whitelist_barcode in whitelist_barcodes:
whitelist.append(whitelist_barcode)
# for every possible combination of positions for error(s)
for positions in itertools.product(
range(0, len(whitelist_barcode)), repeat=threshold):
m_bases = [base2errors[whitelist_barcode[x]] for x in positions]
# for every possible combination of errors
for m in itertools.product(*m_bases):
error_barcode = list(whitelist_barcode)
# add errors
for pos, error_base in zip(positions, m):
error_barcode[pos] = error_base
error_barcode = "".join(error_barcode)
# if error barcode has already been seen, must be within
# threshold edit distance of >1 whitelisted barcodes
if error_barcode in false_to_true_map:
# don't report multiple times for the same barcode
if false_to_true_map[error_barcode]:
U.info("Error barcode %s can be assigned to more than "
"one possible true barcode: %s or %s" % (
error_barcode,
false_to_true_map[error_barcode],
whitelist_barcode))
false_to_true_map[error_barcode] = None
else:
false_to_true_map[error_barcode] = whitelist_barcode
elif getErrorCorrection:
assert not whitelist_tsv2, ("Can only extract errors from the whitelist "
"if a single whitelist is given")
with U.openFile(whitelist_tsv, "r") as inf:
for line in inf:
if line.startswith('#'):
continue
line = line.strip().split("\t")
whitelist_barcode = line[0]
whitelist.append(whitelist_barcode)
if getErrorCorrection:
for error_barcode in line[1].split(","):
false_to_true_map[error_barcode] = whitelist_barcode
else: # no error correction
if whitelist_tsv2:
whitelist_barcodes = pairedBarcodeGenerator(whitelist_tsv, whitelist_tsv2)
else:
whitelist_barcodes = singleBarcodeGenerator(whitelist_tsv)
whitelist = [x for x in whitelist_barcodes]
return set(whitelist), false_to_true_map
def checkError(barcode, whitelist, errors=1):
'''
Check for errors (substitutions, insertions, deletions) between a barcode
and a set of whitelist barcodes.
Returns the whitelist barcodes which match the input barcode
allowing for errors. Returns as soon as two are identified.
'''
near_matches = []
comp_regex = regex.compile("(%s){e<=%i}" % (barcode, errors))
b_length = len(barcode)
for whitelisted_barcode in whitelist:
w_length = len(whitelisted_barcode)
# Don't check against itself
if barcode == whitelisted_barcode:
continue
# If difference in barcode lengths > number of allowed errors, continue
if (max(b_length, w_length) > (min(b_length, w_length) + errors)):
continue
if comp_regex.match(whitelisted_barcode):
near_matches.append(whitelisted_barcode)
# Assuming downstream processes are the same for
# (>1 -> Inf) near_matches this is OK
if len(near_matches) > 1:
return near_matches
return near_matches
def errorDetectAboveThreshold(cell_barcode_counts,
cell_whitelist,
true_to_false_map,
errors=1,
resolution_method="discard"):
assert resolution_method in ["discard", "correct"], (
"resolution method must be discard or correct")
error_counter = collections.Counter()
new_true_to_false_map = copy.deepcopy(true_to_false_map)
discard_cbs = set()
cell_whitelist = list(cell_whitelist)
cell_whitelist.sort(key=lambda x: cell_barcode_counts[x])
for ix, cb in enumerate(cell_whitelist):
near_misses = checkError(cb, cell_whitelist[ix+1:], errors=errors)
if len(near_misses) > 0:
error_counter["error_discarded_mt_1"]
discard_cbs.add(cb) # Will always discard CB from cell_whitelist
if resolution_method == "correct" and len(near_misses) == 1:
# Only correct substitutions as INDELs will also mess
# up UMI so simple correction of CB is insufficient
if regex.match("(%s){s<=%i}" % (cb, errors), near_misses[0]):
# add corrected barcode to T:F map
new_true_to_false_map[near_misses[0]].add(cb)
error_counter["substitution_corrected"] += 1
else:
discard_cbs.add(cb)
error_counter["indel_discarded"] += 1
else:
error_counter["error_discarded"] += 1
if resolution_method == "correct":
U.info("CBs above the knee corrected due to possible substitutions: %i" %
error_counter["substitution_corrected"])
U.info("CBs above the knee discarded due to possible INDELs: %i" %
error_counter["indel_discarded"])
U.info("CBs above the knee discarded due to possible errors from "
"multiple other CBs: %i" % error_counter["error_discarded_mt_1"])
else:
U.info("CBs above the knee discarded due to possible errors: %i" %
len(discard_cbs))
cell_whitelist = set(cell_whitelist).difference(discard_cbs)
return(cell_whitelist, new_true_to_false_map)
| mit |
hsaputra/tensorflow | tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py | 12 | 13093 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Eager Execution Example: RNN Colorbot.
This example builds, trains, and evaluates a multi-layer RNN that can be
run with eager execution enabled. The RNN is trained to map color names to
their RGB values: it takes as input a one-hot encoded character sequence and
outputs a three-tuple (R, G, B) (scaled by 1/255).
For example, say we'd like the RNN Colorbot to generate the RGB values for the
color white. To represent our query in a form that the Colorbot could
understand, we would create a sequence of five 256-long vectors encoding the
ASCII values of the characters in "white". The first vector in our sequence
would be 0 everywhere except for the ord("w")-th position, where it would be
1, the second vector would be 0 everywhere except for the
ord("h")-th position, where it would be 1, and similarly for the remaining three
vectors. We refer to such indicator vectors as "one-hot encodings" of
characters. After consuming these vectors, a well-trained Colorbot would output
the three tuple (1, 1, 1), since the RGB values for white are (255, 255, 255).
We are of course free to ask the colorbot to generate colors for any string we'd
like, such as "steel gray," "tensorflow orange," or "green apple," though
your mileage may vary as your queries increase in creativity.
This example shows how to:
1. read, process, (one-hot) encode, and pad text data via the
Datasets API;
2. build a trainable model;
3. implement a multi-layer RNN using Python control flow
constructs (e.g., a for loop);
4. train a model using an iterative gradient-based method; and
The data used in this example is licensed under the Creative Commons
Attribution-ShareAlike License and is available at
https://en.wikipedia.org/wiki/List_of_colors:_A-F
https://en.wikipedia.org/wiki/List_of_colors:_G-M
https://en.wikipedia.org/wiki/List_of_colors:_N-Z
This example was adapted from
https://github.com/random-forests/tensorflow-workshop/tree/master/extras/colorbot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import six
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
from tensorflow.python.eager import context
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
def parse(line):
"""Parse a line from the colors dataset."""
# Each line of the dataset is comma-separated and formatted as
# color_name, r, g, b
# so `items` is a list [color_name, r, g, b].
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.
# Represent the color name as a one-hot encoded character sequence.
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
# The sequence length is needed by our RNN.
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def load_dataset(data_dir, url, batch_size):
"""Loads the colors data at path into a PaddedDataset."""
# Downloads data at url into data_dir/basename(url). The dataset has a header
# row (color_name, r, g, b) followed by comma-separated lines.
path = tf.contrib.learn.datasets.base.maybe_download(
os.path.basename(url), data_dir, url)
# This chain of commands loads our data by:
# 1. skipping the header; (.skip(1))
# 2. parsing the subsequent lines; (.map(parse))
# 3. shuffling the data; (.shuffle(...))
# 3. grouping the data into padded batches (.padded_batch(...)).
dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(
buffer_size=10000).padded_batch(
batch_size, padded_shapes=([None], [None, None], []))
return dataset
# pylint: disable=not-callable
class RNNColorbot(tfe.Network):
"""Multi-layer (LSTM) RNN that regresses on real-valued vector labels.
"""
def __init__(self, rnn_cell_sizes, label_dimension, keep_prob):
"""Constructs an RNNColorbot.
Args:
rnn_cell_sizes: list of integers denoting the size of each LSTM cell in
the RNN; rnn_cell_sizes[i] is the size of the i-th layer cell
label_dimension: the length of the labels on which to regress
keep_prob: (1 - dropout probability); dropout is applied to the outputs of
each LSTM layer
"""
super(RNNColorbot, self).__init__(name="")
self.label_dimension = label_dimension
self.keep_prob = keep_prob
# Note the calls to `track_layer` below; these calls register the layers as
# network components that house trainable variables.
self.cells = [
self.track_layer(tf.nn.rnn_cell.BasicLSTMCell(size))
for size in rnn_cell_sizes
]
self.relu = self.track_layer(
tf.layers.Dense(label_dimension, activation=tf.nn.relu, name="relu"))
def call(self, chars, sequence_length, training=False):
"""Implements the RNN logic and prediction generation.
Args:
chars: a Tensor of dimension [batch_size, time_steps, 256] holding a
batch of one-hot encoded color names
sequence_length: a Tensor of dimension [batch_size] holding the length
of each character sequence (i.e., color name)
training: whether the invocation is happening during training
Returns:
A tensor of dimension [batch_size, label_dimension] that is produced by
passing chars through a multi-layer RNN and applying a ReLU to the final
hidden state.
"""
# Transpose the first and second dimensions so that chars is of shape
# [time_steps, batch_size, dimension].
chars = tf.transpose(chars, [1, 0, 2])
# The outer loop cycles through the layers of the RNN; the inner loop
# executes the time steps for a particular layer.
batch_size = int(chars.shape[1])
for l in range(len(self.cells)):
cell = self.cells[l]
outputs = []
state = cell.zero_state(batch_size, tf.float32)
# Unstack the inputs to obtain a list of batches, one for each time step.
chars = tf.unstack(chars, axis=0)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
# The outputs of this layer are the inputs of the subsequent layer.
chars = tf.stack(outputs, axis=0)
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
# that they could be easily fed through the above RNN loop. The
# `sequence_length` vector tells us the true lengths of the character
# sequences, letting us obtain for each sequence the hidden state that was
# generated by its non-padding characters.
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss(labels, predictions):
"""Computes mean squared loss."""
return tf.reduce_mean(tf.square(predictions - labels))
def test(model, eval_data):
"""Computes the average loss on eval_data, which should be a Dataset."""
avg_loss = tfe.metrics.Mean("loss")
for (labels, chars, sequence_length) in tfe.Iterator(eval_data):
predictions = model(chars, sequence_length, training=False)
avg_loss(loss(labels, predictions))
print("eval/loss: %.6f\n" % avg_loss.result())
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", avg_loss.result())
def train_one_epoch(model, optimizer, train_data, log_interval=10):
"""Trains model on train_data using optimizer."""
tf.train.get_or_create_global_step()
def model_loss(labels, chars, sequence_length):
predictions = model(chars, sequence_length, training=True)
loss_value = loss(labels, predictions)
tf.contrib.summary.scalar("loss", loss_value)
return loss_value
for (batch, (labels, chars, sequence_length)) in enumerate(
tfe.Iterator(train_data)):
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
batch_model_loss = functools.partial(model_loss, labels, chars,
sequence_length)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/train.csv"
SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/test.csv"
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
start = time.time()
with train_summary_writer.as_default():
train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)
end = time.time()
print("train/time for epoch #%d: %.2f" % (epoch, end - start))
with test_summary_writer.as_default():
test(model, eval_data)
print("Colorbot is ready to generate colors!")
while True:
try:
color_name = six.moves.input(
"Give me a color name (or press enter to exit): ")
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model(chars, length, training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
print("rgb:", rgb)
data = [[clipped_preds]]
if HAS_MATPLOTLIB:
plt.imshow(data)
plt.title(color_name)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="/tmp/rnn_colorbot/",
help="Directory to download data files and save logs.")
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="Log training loss every log_interval batches.")
parser.add_argument(
"--num_epochs", type=int, default=20, help="Number of epochs to train.")
parser.add_argument(
"--rnn_cell_sizes",
type=int,
nargs="+",
default=[256, 128],
help="List of sizes for each layer of the RNN.")
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Batch size for training and eval.")
parser.add_argument(
"--keep_probability",
type=float,
default=0.5,
help="Keep probability for dropout between layers.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.01,
help="Learning rate to be used during training.")
parser.add_argument(
"--no_gpu",
action="store_true",
default=False,
help="Disables GPU usage even if a GPU is available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/backends/backend_svg.py | 2 | 40911 | from __future__ import division
import os, codecs, base64, tempfile, urllib, gzip, cStringIO, re, sys
import numpy as np
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from matplotlib import verbose, __version__, rcParams
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import is_string_like, is_writable_file_like, maxdict
from matplotlib.colors import rgb2hex
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, FontProperties
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
from matplotlib import _png
from xml.sax.saxutils import escape as escape_xml_text
backend_version = __version__
# ----------------------------------------------------------------------
# SimpleXMLWriter class
#
# Based on an original by Fredrik Lundh, but modified here to:
# 1. Support modern Python idioms
# 2. Remove encoding support (it's handled by the file writer instead)
# 3. Support proper indentation
# 4. Minify things a little bit
# --------------------------------------------------------------------
# The SimpleXMLWriter module is
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
def escape_cdata(s):
s = s.replace(u"&", u"&")
s = s.replace(u"<", u"<")
s = s.replace(u">", u">")
return s
def escape_attrib(s):
s = s.replace(u"&", u"&")
s = s.replace(u"'", u"'")
s = s.replace(u"\"", u""")
s = s.replace(u"<", u"<")
s = s.replace(u">", u">")
return s
##
# XML writer class.
#
# @param file A file or file-like object. This object must implement
# a <b>write</b> method that takes an 8-bit string.
class XMLWriter:
def __init__(self, file):
self.__write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self.__open = 0 # true if start tag is open
self.__tags = []
self.__data = []
self.__indentation = u" " * 64
def __flush(self, indent=True):
# flush internal buffers
if self.__open:
if indent:
self.__write(u">\n")
else:
self.__write(u">")
self.__open = 0
if self.__data:
data = u''.join(self.__data)
self.__write(escape_cdata(data))
self.__data = []
## Opens a new element. Attributes can be given as keyword
# arguments, or as a string/string dictionary. The method returns
# an opaque identifier that can be passed to the <b>close</b>
# method, to close all open elements up to and including this one.
#
# @param tag Element tag.
# @param attrib Attribute dictionary. Alternatively, attributes
# can be given as keyword arguments.
# @return An element identifier.
def start(self, tag, attrib={}, **extra):
self.__flush()
tag = escape_cdata(tag)
self.__data = []
self.__tags.append(tag)
self.__write(self.__indentation[:len(self.__tags) - 1])
self.__write(u"<%s" % tag)
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = attrib.items()
attrib.sort()
for k, v in attrib:
if not v == '':
k = escape_cdata(k)
v = escape_attrib(v)
self.__write(u" %s=\"%s\"" % (k, v))
self.__open = 1
return len(self.__tags)-1
##
# Adds a comment to the output stream.
#
# @param comment Comment text, as a Unicode string.
def comment(self, comment):
self.__flush()
self.__write(self.__indentation[:len(self.__tags)])
self.__write(u"<!-- %s -->\n" % escape_cdata(comment))
##
# Adds character data to the output stream.
#
# @param text Character data, as a Unicode string.
def data(self, text):
self.__data.append(text)
##
# Closes the current element (opened by the most recent call to
# <b>start</b>).
#
# @param tag Element tag. If given, the tag must match the start
# tag. If omitted, the current element is closed.
def end(self, tag=None, indent=True):
if tag:
assert self.__tags, "unbalanced end(%s)" % tag
assert escape_cdata(tag) == self.__tags[-1],\
"expected end(%s), got %s" % (self.__tags[-1], tag)
else:
assert self.__tags, "unbalanced end()"
tag = self.__tags.pop()
if self.__data:
self.__flush(indent)
elif self.__open:
self.__open = 0
self.__write(u"/>\n")
return
if indent:
self.__write(self.__indentation[:len(self.__tags)])
self.__write(u"</%s>\n" % tag)
##
# Closes open elements, up to (and including) the element identified
# by the given identifier.
#
# @param id Element identifier, as returned by the <b>start</b> method.
def close(self, id):
while len(self.__tags) > id:
self.end()
##
# Adds an entire element. This is the same as calling <b>start</b>,
# <b>data</b>, and <b>end</b> in sequence. The <b>text</b> argument
# can be omitted.
def element(self, tag, text=None, attrib={}, **extra):
apply(self.start, (tag, attrib), extra)
if text:
self.data(text)
self.end(indent=False)
##
# Flushes the output stream.
def flush(self):
pass # replaced by the constructor
# ----------------------------------------------------------------------
def generate_transform(transform_list=[]):
if len(transform_list):
output = cStringIO.StringIO()
for type, value in transform_list:
if type == 'scale' and (value == (1.0,) or value == (1.0, 1.0)):
continue
if type == 'translate' and value == (0.0, 0.0):
continue
if type == 'rotate' and value == (0.0,):
continue
if type == 'matrix' and isinstance(value, Affine2DBase):
value = value.to_values()
output.write('%s(%s)' % (type, ' '.join(str(x) for x in value)))
return output.getvalue()
return ''
def generate_css(attrib={}):
if attrib:
output = cStringIO.StringIO()
attrib = attrib.items()
attrib.sort()
for k, v in attrib:
k = escape_attrib(k)
v = escape_attrib(v)
output.write("%s:%s;" % (k, v))
return output.getvalue()
return ''
_capstyle_d = {'projecting' : 'square', 'butt' : 'butt', 'round': 'round',}
class RendererSVG(RendererBase):
FONT_SCALE = 100.0
fontd = maxdict(50)
def __init__(self, width, height, svgwriter, basename=None):
self.width = width
self.height = height
self.writer = XMLWriter(svgwriter)
self._groupd = {}
if not rcParams['svg.image_inline']:
assert basename is not None
self.basename = basename
self._imaged = {}
self._clipd = {}
self._char_defs = {}
self._markers = {}
self._path_collection_id = 0
self._imaged = {}
self._hatchd = {}
self._has_gouraud = False
self._n_gradients = 0
self._fonts = {}
self.mathtext_parser = MathTextParser('SVG')
RendererBase.__init__(self)
self._glyph_map = dict()
svgwriter.write(svgProlog)
self._start_id = self.writer.start(
'svg',
width='%ipt' % width, height='%ipt' % height,
viewBox='0 0 %i %i' % (width, height),
xmlns="http://www.w3.org/2000/svg",
version="1.1",
attrib={'xmlns:xlink': "http://www.w3.org/1999/xlink"})
self._write_default_style()
def finalize(self):
self._write_clips()
self._write_hatches()
self._write_svgfonts()
self.writer.close(self._start_id)
def _write_default_style(self):
writer = self.writer
default_style = generate_css({
'stroke-linejoin': 'round',
'stroke-linecap': 'square'})
writer.start('defs')
writer.start('style', type='text/css')
writer.data('*{%s}\n' % default_style)
writer.end('style')
writer.end('defs')
def _make_id(self, type, content):
return '%s%s' % (type, md5(str(content)).hexdigest()[:10])
def _make_flip_transform(self, transform):
return (transform +
Affine2D()
.scale(1.0, -1.0)
.translate(0.0, self.height))
def _get_font(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _get_hatch(self, gc, rgbFace):
"""
Create a new hatch pattern
"""
dictkey = (gc.get_hatch(), rgbFace, gc.get_rgb())
oid = self._hatchd.get(dictkey)
if oid is None:
oid = self._make_id('h', dictkey)
self._hatchd[dictkey] = ((gc.get_hatch_path(), rgbFace, gc.get_rgb()), oid)
else:
_, oid = oid
return oid
def _write_hatches(self):
if not len(self._hatchd):
return
HATCH_SIZE = 72
writer = self.writer
writer.start('defs')
for ((path, face, stroke), oid) in self._hatchd.values():
writer.start(
'pattern',
id=oid,
patternUnits="userSpaceOnUse",
x="0", y="0", width=str(HATCH_SIZE), height=str(HATCH_SIZE))
path_data = self._convert_path(
path,
Affine2D().scale(HATCH_SIZE).scale(1.0, -1.0).translate(0, HATCH_SIZE),
simplify=False)
if face is None:
fill = 'none'
else:
fill = rgb2hex(face)
writer.element(
'rect',
x="0", y="0", width=str(HATCH_SIZE+1), height=str(HATCH_SIZE+1),
fill=fill)
writer.element(
'path',
d=path_data,
style=generate_css({
'fill': rgb2hex(stroke),
'stroke': rgb2hex(stroke),
'stroke-width': str(1.0),
'stroke-linecap': 'butt',
'stroke-linejoin': 'miter'
})
)
writer.end('pattern')
writer.end('defs')
def _get_style_dict(self, gc, rgbFace):
"""
return the style string. style is generated from the
GraphicsContext and rgbFace
"""
attrib = {}
if gc.get_hatch() is not None:
attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace)
else:
if rgbFace is None:
attrib['fill'] = 'none'
elif tuple(rgbFace[:3]) != (0, 0, 0):
attrib['fill'] = rgb2hex(rgbFace)
if gc.get_alpha() != 1.0:
attrib['opacity'] = str(gc.get_alpha())
offset, seq = gc.get_dashes()
if seq is not None:
attrib['stroke-dasharray'] = ','.join(['%f' % val for val in seq])
attrib['stroke-dashoffset'] = str(float(offset))
linewidth = gc.get_linewidth()
if linewidth:
attrib['stroke'] = rgb2hex(gc.get_rgb())
if linewidth != 1.0:
attrib['stroke-width'] = str(linewidth)
if gc.get_joinstyle() != 'round':
attrib['stroke-linejoin'] = gc.get_joinstyle()
if gc.get_capstyle() != 'projecting':
attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()]
return attrib
def _get_style(self, gc, rgbFace):
return generate_css(self._get_style_dict(gc, rgbFace))
def _get_clip(self, gc):
cliprect = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
clippath_trans = self._make_flip_transform(clippath_trans)
dictkey = (id(clippath), str(clippath_trans))
elif cliprect is not None:
x, y, w, h = cliprect.bounds
y = self.height-(y+h)
dictkey = (x, y, w, h)
else:
return None
clip = self._clipd.get(dictkey)
if clip is None:
oid = self._make_id('p', dictkey)
if clippath is not None:
self._clipd[dictkey] = ((clippath, clippath_trans), oid)
else:
self._clipd[dictkey] = (dictkey, oid)
else:
clip, oid = clip
return oid
def _write_clips(self):
if not len(self._clipd):
return
writer = self.writer
writer.start('defs')
for clip, oid in self._clipd.values():
writer.start('clipPath', id=oid)
if len(clip) == 2:
clippath, clippath_trans = clip
path_data = self._convert_path(clippath, clippath_trans, simplify=False)
writer.element('path', d=path_data)
else:
x, y, w, h = clip
writer.element('rect', x=str(x), y=str(y), width=str(w), height=str(h))
writer.end('clipPath')
writer.end('defs')
def _write_svgfonts(self):
if not rcParams['svg.fonttype'] == 'svgfont':
return
writer = self.writer
writer.start('defs')
for font_fname, chars in self._fonts.items():
font = FT2Font(font_fname)
font.set_size(72, 72)
sfnt = font.get_sfnt()
writer.start('font', id=sfnt[(1, 0, 0, 4)])
writer.element(
'font-face',
attrib={
'font-family': font.family_name,
'font-style': font.style_name.lower(),
'units-per-em': '72',
'bbox': ' '.join(str(x / 64.0) for x in font.bbox)})
for char in chars:
glyph = font.load_char(char, flags=LOAD_NO_HINTING)
verts, codes = font.get_path()
path = Path(verts, codes)
path_data = self._convert_path(path)
# name = font.get_glyph_name(char)
writer.element(
'glyph',
d=path_data,
attrib={
# 'glyph-name': name,
'unicode': unichr(char),
'horiz-adv-x': str(glyph.linearHoriAdvance / 65536.0)})
writer.end('font')
writer.end('defs')
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group.
"""
if gid:
self.writer.start('g', id=gid)
else:
self._groupd[s] = self._groupd.get(s, 0) + 1
self.writer.start('g', id="%s_%d" % (s, self._groupd[s]))
def close_group(self, s):
self.writer.end('g')
def option_image_nocomposite(self):
"""
if svg.image_noscale is True, compositing multiple images into one is prohibited
"""
return rcParams['svg.image_noscale']
def _convert_path(self, path, transform=None, clip=None, simplify=None):
if clip:
clip = (0.0, 0.0, self.width, self.height)
else:
clip = None
return _path.convert_to_svg(path, transform, clip, simplify, 6)
def draw_path(self, gc, path, transform, rgbFace=None):
trans_and_flip = self._make_flip_transform(transform)
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
path_data = self._convert_path(
path, trans_and_flip, clip=clip, simplify=simplify)
attrib = {}
attrib['style'] = self._get_style(gc, rgbFace)
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
if gc.get_url() is not None:
self.writer.start('a', {'xlink:href': gc.get_url()})
self.writer.element('path', d=path_data, attrib=attrib)
if gc.get_url() is not None:
self.writer.end('</a>')
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if not len(path.vertices):
return
writer = self.writer
path_data = self._convert_path(
marker_path,
marker_trans + Affine2D().scale(1.0, -1.0),
simplify=False)
style = self._get_style_dict(gc, rgbFace)
dictkey = (path_data, generate_css(style))
oid = self._markers.get(dictkey)
for key in style.keys():
if not key.startswith('stroke'):
del style[key]
style = generate_css(style)
if oid is None:
oid = self._make_id('m', dictkey)
writer.start('defs')
writer.element('path', id=oid, d=path_data, style=style)
writer.end('defs')
self._markers[dictkey] = oid
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
writer.start('g', attrib=attrib)
trans_and_flip = self._make_flip_transform(trans)
attrib = {'xlink:href': '#%s' % oid}
for vertices, code in path.iter_segments(trans_and_flip, simplify=False):
if len(vertices):
x, y = vertices[-2:]
attrib['x'] = str(x)
attrib['y'] = str(y)
attrib['style'] = self._get_style(gc, rgbFace)
writer.element('use', attrib=attrib)
writer.end('g')
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
writer = self.writer
path_codes = []
writer.start('defs')
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
transform = Affine2D(transform.get_matrix()).scale(1.0, -1.0)
d = self._convert_path(path, transform, simplify=False)
oid = 'C%x_%x_%s' % (self._path_collection_id, i,
self._make_id('', d))
writer.element('path', id=oid, d=d)
path_codes.append(oid)
writer.end('defs')
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, path_codes, offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
clipid = self._get_clip(gc0)
url = gc0.get_url()
if url is not None:
writer.start('a', attrib={'xlink:href': url})
if clipid is not None:
writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
attrib = {
'xlink:href': '#%s' % path_id,
'x': str(xo),
'y': str(self.height - yo),
'style': self._get_style(gc0, rgbFace)
}
writer.element('use', attrib=attrib)
if clipid is not None:
writer.end('g')
if url is not None:
writer.end('a')
self._path_collection_id += 1
def draw_gouraud_triangle(self, gc, points, colors, trans):
# This uses a method described here:
#
# http://www.svgopen.org/2005/papers/Converting3DFaceToSVG/index.html
#
# that uses three overlapping linear gradients to simulate a
# Gouraud triangle. Each gradient goes from fully opaque in
# one corner to fully transparent along the opposite edge.
# The line between the stop points is perpendicular to the
# opposite edge. Underlying these three gradients is a solid
# triangle whose color is the average of all three points.
writer = self.writer
if not self._has_gouraud:
self._has_gouraud = True
writer.start(
'filter',
id='colorAdd')
writer.element(
'feComposite',
attrib={'in': 'SourceGraphic'},
in2='BackgroundImage',
operator='arithmetic',
k2="1", k3="1")
writer.end('filter')
avg_color = np.sum(colors[:, :], axis=0) / 3.0
# Just skip fully-transparent triangles
if avg_color[-1] == 0.0:
return
trans_and_flip = self._make_flip_transform(trans)
tpoints = trans_and_flip.transform(points)
writer.start('defs')
for i in range(3):
x1, y1 = points[i]
x2, y2 = points[(i + 1) % 3]
x3, y3 = points[(i + 2) % 3]
c = colors[i][:]
if x2 == x3:
xb = x2
yb = y1
elif y2 == y3:
xb = x1
yb = y2
else:
m1 = (y2 - y3) / (x2 - x3)
b1 = y2 - (m1 * x2)
m2 = -(1.0 / m1)
b2 = y1 - (m2 * x1)
xb = (-b1 + b2) / (m1 - m2)
yb = m2 * xb + b2
writer.start(
'linearGradient',
id="GR%x_%d" % (self._n_gradients, i),
x1=str(x1), y1=str(y1), x2=str(xb), y2=str(yb))
writer.element(
'stop',
offset='0',
style=generate_css({'stop-color': rgb2hex(c),
'stop-opacity': str(c[-1])}))
writer.element(
'stop',
offset='1',
style=generate_css({'stop-color': rgb2hex(c),
'stop-opacity': "0"}))
writer.end('linearGradient')
writer.element(
'polygon',
id='GT%x' % self._n_gradients,
points=" ".join([str(x) for x in x1,y1,x2,y2,x3,y3]))
writer.end('defs')
avg_color = np.sum(colors[:, :], axis=0) / 3.0
href = '#GT%x' % self._n_gradients
writer.element(
'use',
attrib={'xlink:href': href,
'fill': rgb2hex(avg_color),
'fill-opacity': str(avg_color[-1])})
for i in range(3):
writer.element(
'use',
attrib={'xlink:href': href,
'fill': 'url(#GR%x_%d)' % (self._n_gradients, i),
'fill-opacity': '1',
'filter': 'url(#colorAdd)'})
self._n_gradients += 1
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
self.writer.start('g', attrib=attrib)
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
self.writer.end('g')
def option_scale_image(self):
return True
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
# Can't apply clip-path directly to the image because the
# image has a transformation, which would also be applied
# to the clip-path
self.writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
trans = [1,0,0,1,0,0]
if rcParams['svg.image_noscale']:
trans = list(im.get_matrix())
trans[5] = -trans[5]
attrib['transform'] = generate_transform([('matrix', tuple(trans))])
assert trans[1] == 0
assert trans[2] == 0
numrows, numcols = im.get_size()
im.reset_matrix()
im.set_interpolation(0)
im.resize(numcols, numrows)
h,w = im.get_size_out()
url = getattr(im, '_url', None)
if url is not None:
self.writer.start('a', attrib={'xlink:href': url})
if rcParams['svg.image_inline']:
stringio = cStringIO.StringIO()
im.flipud_out()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, stringio)
im.flipud_out()
attrib['xlink:href'] = ("data:image/png;base64,\n" +
base64.encodestring(stringio.getvalue()))
else:
self._imaged[self.basename] = self._imaged.get(self.basename,0) + 1
filename = '%s.image%d.png'%(self.basename, self._imaged[self.basename])
verbose.report( 'Writing image file for inclusion: %s' % filename)
im.flipud_out()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, filename)
im.flipud_out()
attrib['xlink:href'] = filename
alpha = gc.get_alpha()
if alpha != 1.0:
attrib['opacity'] = str(alpha)
if transform is None:
self.writer.element(
'image',
x=str(x/trans[0]), y=str((self.height-y)/trans[3]-h),
width=str(w), height=str(h),
attrib=attrib)
else:
flipped = self._make_flip_transform(transform)
attrib['transform'] = generate_transform(
[('matrix', flipped.to_values())])
self.writer.element(
'image',
x=str(x), y=str(y+dy), width=str(dx), height=str(-dy),
attrib=attrib)
if url is not None:
self.writer.end('a')
if clipid is not None:
self.writer.end('g')
def _adjust_char_id(self, char_id):
return char_id.replace("%20", "_")
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
writer = self.writer
writer.comment(s)
glyph_map=self._glyph_map
text2path = self._text2path
color = rgb2hex(gc.get_rgb())
fontsize = prop.get_size_in_points()
style = {}
if color != '#000000':
style['fill'] = color
if gc.get_alpha() != 1.0:
style['opacity'] = str(gc.get_alpha())
if not ismath:
font = text2path._get_font(prop)
_glyphs = text2path.get_glyphs_with_font(
font, s, glyph_map=glyph_map, return_new_glyphs_only=True)
glyph_info, glyph_map_new, rects = _glyphs
y -= ((font.get_descent() / 64.0) *
(prop.get_size_in_points() / text2path.FONT_SCALE))
if glyph_map_new:
writer.start('defs')
for char_id, glyph_path in glyph_map_new.iteritems():
path = Path(*glyph_path)
path_data = self._convert_path(path, simplify=False)
writer.element('path', id=char_id, d=path_data)
writer.end('defs')
glyph_map.update(glyph_map_new)
attrib = {}
attrib['style'] = generate_css(style)
font_scale = fontsize / text2path.FONT_SCALE
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,)),
('scale', (font_scale, -font_scale))])
writer.start('g', attrib=attrib)
for glyph_id, xposition, yposition, scale in glyph_info:
attrib={'xlink:href': '#%s' % glyph_id}
if xposition != 0.0:
attrib['x'] = str(xposition)
if yposition != 0.0:
attrib['y'] = str(yposition)
writer.element(
'use',
attrib=attrib)
writer.end('g')
else:
if ismath == "TeX":
_glyphs = text2path.get_glyphs_tex(prop, s, glyph_map=glyph_map,
return_new_glyphs_only=True)
else:
_glyphs = text2path.get_glyphs_mathtext(prop, s, glyph_map=glyph_map,
return_new_glyphs_only=True)
glyph_info, glyph_map_new, rects = _glyphs
# we store the character glyphs w/o flipping. Instead, the
# coordinate will be flipped when this characters are
# used.
if glyph_map_new:
writer.start('defs')
for char_id, glyph_path in glyph_map_new.iteritems():
char_id = self._adjust_char_id(char_id)
# Some characters are blank
if not len(glyph_path[0]):
path_data = ""
else:
path = Path(*glyph_path)
path_data = self._convert_path(path, simplify=False)
writer.element('path', id=char_id, d=path_data)
writer.end('defs')
glyph_map.update(glyph_map_new)
attrib = {}
font_scale = fontsize / text2path.FONT_SCALE
attrib['style'] = generate_css(style)
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,)),
('scale', (font_scale, - font_scale))])
writer.start('g', attrib=attrib)
for char_id, xposition, yposition, scale in glyph_info:
char_id = self._adjust_char_id(char_id)
writer.element(
'use',
transform=generate_transform([
('translate', (xposition, yposition)),
('scale', (scale,)),
]),
attrib={'xlink:href': '#%s' % char_id})
for verts, codes in rects:
path = Path(verts, codes)
path_data = self._convert_path(path, simplify=False)
writer.element('path', d=path_data)
writer.end('g')
def _draw_text_as_text(self, gc, x, y, s, prop, angle, ismath):
writer = self.writer
color = rgb2hex(gc.get_rgb())
style = {}
if color != '#000000':
style['fill'] = color
if gc.get_alpha() != 1.0:
style['opacity'] = str(gc.get_alpha())
if not ismath:
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
y -= font.get_descent() / 64.0
fontsize = prop.get_size_in_points()
fontfamily = font.family_name
fontstyle = prop.get_style()
attrib = {}
# Must add "px" to workaround a Firefox bug
style['font-size'] = str(fontsize) + 'px'
style['font-family'] = str(fontfamily)
style['font-style'] = prop.get_style().lower()
attrib['style'] = generate_css(style)
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,))])
writer.element('text', s, attrib=attrib)
if rcParams['svg.fonttype'] == 'svgfont':
fontset = self._fonts.setdefault(font.fname, set())
for c in s:
fontset.add(ord(c))
else:
writer.comment(s)
width, height, descent, svg_elements, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
svg_glyphs = svg_elements.svg_glyphs
svg_rects = svg_elements.svg_rects
attrib = {}
attrib['style'] = generate_css(style)
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,))])
# Apply attributes to 'g', not 'text', because we likely
# have some rectangles as well with the same style and
# transformation
writer.start('g', attrib=attrib)
writer.start('text')
# Sort the characters by font, and output one tspan for
# each
spans = {}
for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
style = generate_css({
# Must add "px" to work around a Firefox bug
'font-size': str(fontsize) + 'px',
'font-family': font.family_name,
'font-style': font.style_name.lower()})
if thetext == 32:
thetext = 0xa0 # non-breaking space
spans.setdefault(style, []).append((new_x, -new_y, thetext))
if rcParams['svg.fonttype'] == 'svgfont':
for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
fontset = self._fonts.setdefault(font.fname, set())
fontset.add(thetext)
for style, chars in spans.items():
chars.sort()
same_y = True
if len(chars) > 1:
last_y = chars[0][1]
for i in xrange(1, len(chars)):
if chars[i][1] != last_y:
same_y = False
break
if same_y:
ys = str(chars[0][1])
else:
ys = ' '.join(str(c[1]) for c in chars)
attrib = {
'style': style,
'x': ' '.join(str(c[0]) for c in chars),
'y': ys
}
writer.element(
'tspan',
''.join(unichr(c[2]) for c in chars),
attrib=attrib)
writer.end('text')
if len(svg_rects):
for x, y, width, height in svg_rects:
writer.element(
'rect',
x=str(x), y=str(-y + height),
width=str(width), height=str(height)
)
writer.end('g')
def draw_tex(self, gc, x, y, s, prop, angle):
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath):
clipid = self._get_clip(gc)
if clipid is not None:
# Cannot apply clip-path directly to the text, because
# is has a transformation
self.writer.start(
'g', attrib={'clip-path': 'url(#%s)' % clipid})
if rcParams['svg.fonttype'] == 'path':
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
else:
self._draw_text_as_text(gc, x, y, s, prop, angle, ismath)
if clipid is not None:
self.writer.end('g')
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
return self._text2path.get_text_width_height_descent(s, prop, ismath)
class FigureCanvasSVG(FigureCanvasBase):
filetypes = {'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'}
def print_svg(self, filename, *args, **kwargs):
if is_string_like(filename):
fh_to_close = svgwriter = codecs.open(filename, 'w', 'utf-8')
elif is_writable_file_like(filename):
svgwriter = codecs.getwriter('utf-8')(filename)
fh_to_close = None
else:
raise ValueError("filename must be a path or a file-like object")
return self._print_svg(filename, svgwriter, fh_to_close, **kwargs)
def print_svgz(self, filename, *args, **kwargs):
if is_string_like(filename):
gzipwriter = gzip.GzipFile(filename, 'w')
fh_to_close = svgwriter = codecs.getwriter('utf-8')(gzipwriter)
elif is_writable_file_like(filename):
fh_to_close = gzipwriter = gzip.GzipFile(fileobj=filename, mode='w')
svgwriter = codecs.getwriter('utf-8')(gzipwriter)
else:
raise ValueError("filename must be a path or a file-like object")
return self._print_svg(filename, svgwriter, fh_to_close)
def _print_svg(self, filename, svgwriter, fh_to_close=None, **kwargs):
self.figure.set_dpi(72.0)
width, height = self.figure.get_size_inches()
w, h = width*72, height*72
if rcParams['svg.image_noscale']:
renderer = RendererSVG(w, h, svgwriter, filename)
else:
# setting mixed renderer dpi other than 72 results in
# incorrect size of the rasterized image. It seems that the
# svg internally uses fixed dpi of 72 and seems to cause
# the problem. I hope someone who knows the svg backends
# take a look at this problem. Meanwhile, the dpi
# parameter is ignored and image_dpi is fixed at 72. - JJL
#image_dpi = kwargs.pop("dpi", 72)
image_dpi = 72
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(self.figure,
width, height, image_dpi, RendererSVG(w, h, svgwriter, filename),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
if fh_to_close is not None:
svgwriter.close()
def get_default_filetype(self):
return 'svg'
class FigureManagerSVG(FigureManagerBase):
pass
FigureManager = FigureManagerSVG
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasSVG(thisFig)
manager = FigureManagerSVG(canvas, num)
return manager
svgProlog = u"""\
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Created with matplotlib (http://matplotlib.sourceforge.net/) -->
"""
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.