repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tkaitchuck/nupic | external/linux64/lib/python2.6/site-packages/matplotlib/rcsetup.py | 69 | 23344 | """
The rcsetup module contains the default values and the validation code for
customization using matplotlib's rc settings.
Each rc setting is assigned a default value and a function used to validate any
attempted changes to that setting. The default values and validation functions
are defined in the rcsetup module, and are used to construct the rcParams global
object which stores the settings and is referenced throughout matplotlib.
These default values should be consistent with the default matplotlibrc file
that actually reflects the values given here. Any additions or deletions to the
parameter set listed here should also be visited to the
:file:`matplotlibrc.template` in matplotlib's root source directory.
"""
import os
import warnings
from matplotlib.fontconfig_pattern import parse_fontconfig_pattern
from matplotlib.colors import is_color_like
#interactive_bk = ['gtk', 'gtkagg', 'gtkcairo', 'fltkagg', 'qtagg', 'qt4agg',
# 'tkagg', 'wx', 'wxagg', 'cocoaagg']
# The capitalized forms are needed for ipython at present; this may
# change for later versions.
interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'FltkAgg', 'MacOSX',
'QtAgg', 'Qt4Agg', 'TkAgg', 'WX', 'WXAgg', 'CocoaAgg']
non_interactive_bk = ['agg', 'cairo', 'emf', 'gdk',
'pdf', 'ps', 'svg', 'template']
all_backends = interactive_bk + non_interactive_bk
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False):
'valid is a list of legal strings'
self.key = key
self.ignorecase = ignorecase
def func(s):
if ignorecase: return s.lower()
else: return s
self.valid = dict([(func(k),k) for k in valid])
def __call__(self, s):
if self.ignorecase: s = s.lower()
if s in self.valid: return self.valid[s]
raise ValueError('Unrecognized %s string "%s": valid strings are %s'
% (self.key, s, self.valid.values()))
def validate_path_exists(s):
'If s is a path, return s, else False'
if os.path.exists(s): return s
else:
raise RuntimeError('"%s" should be a path but it does not exist'%s)
def validate_bool(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_bool_maybe_none(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b=='none': return None
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_float(s):
'convert s to float or raise'
try: return float(s)
except ValueError:
raise ValueError('Could not convert "%s" to float' % s)
def validate_int(s):
'convert s to int or raise'
try: return int(s)
except ValueError:
raise ValueError('Could not convert "%s" to int' % s)
def validate_fonttype(s):
'confirm that this is a Postscript of PDF font type that we know how to convert to'
fonttypes = { 'type3': 3,
'truetype': 42 }
try:
fonttype = validate_int(s)
except ValueError:
if s.lower() in fonttypes.keys():
return fonttypes[s.lower()]
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.keys())
else:
if fonttype not in fonttypes.values():
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.values())
return fonttype
#validate_backend = ValidateInStrings('backend', all_backends, ignorecase=True)
_validate_standard_backends = ValidateInStrings('backend', all_backends, ignorecase=True)
def validate_backend(s):
if s.startswith('module://'): return s
else: return _validate_standard_backends(s)
validate_numerix = ValidateInStrings('numerix',[
'Numeric','numarray','numpy',
], ignorecase=True)
validate_toolbar = ValidateInStrings('toolbar',[
'None','classic','toolbar2',
], ignorecase=True)
def validate_autolayout(v):
if v:
warnings.warn("figure.autolayout is not currently supported")
class validate_nseq_float:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n floats or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [float(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to floats')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [float(val) for val in s]
class validate_nseq_int:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n ints or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [int(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to ints')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [int(val) for val in s]
def validate_color(s):
'return a valid color arg'
if s.lower() == 'none':
return 'None'
if is_color_like(s):
return s
stmp = '#' + s
if is_color_like(stmp):
return stmp
# If it is still valid, it must be a tuple.
colorarg = s
msg = ''
if s.find(',')>=0:
# get rid of grouping symbols
stmp = ''.join([ c for c in s if c.isdigit() or c=='.' or c==','])
vals = stmp.split(',')
if len(vals)!=3:
msg = '\nColor tuples must be length 3'
else:
try:
colorarg = [float(val) for val in vals]
except ValueError:
msg = '\nCould not convert all entries to floats'
if not msg and is_color_like(colorarg):
return colorarg
raise ValueError('%s does not look like a color arg%s'%(s, msg))
def validate_stringlist(s):
'return a list'
if type(s) is str:
return [ v.strip() for v in s.split(',') ]
else:
assert type(s) in [list,tuple]
return [ str(v) for v in s ]
validate_orientation = ValidateInStrings('orientation',[
'landscape', 'portrait',
])
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid aspect specification')
def validate_fontsize(s):
if type(s) is str:
s = s.lower()
if s in ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large',
'xx-large', 'smaller', 'larger']:
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid font size')
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
validate_fontset = ValidateInStrings('fontset', ['cm', 'stix', 'stixsans', 'custom'])
validate_verbose = ValidateInStrings('verbose',[
'silent', 'helpful', 'debug', 'debug-annoying',
])
validate_cairo_format = ValidateInStrings('cairo_format',
['png', 'ps', 'pdf', 'svg'],
ignorecase=True)
validate_ps_papersize = ValidateInStrings('ps_papersize',[
'auto', 'letter', 'legal', 'ledger',
'a0', 'a1', 'a2','a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10',
'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10',
], ignorecase=True)
def validate_ps_distiller(s):
if type(s) is str:
s = s.lower()
if s in ('none',None):
return None
elif s in ('false', False):
return False
elif s in ('ghostscript', 'xpdf'):
return s
else:
raise ValueError('matplotlibrc ps.usedistiller must either be none, ghostscript or xpdf')
validate_joinstyle = ValidateInStrings('joinstyle',['miter', 'round', 'bevel'], ignorecase=True)
validate_capstyle = ValidateInStrings('capstyle',['butt', 'round', 'projecting'], ignorecase=True)
validate_negative_linestyle = ValidateInStrings('negative_linestyle',['solid', 'dashed'], ignorecase=True)
def validate_negative_linestyle_legacy(s):
try:
res = validate_negative_linestyle(s)
return res
except ValueError:
dashes = validate_nseq_float(2)(s)
warnings.warn("Deprecated negative_linestyle specification; use 'solid' or 'dashed'")
return (0, dashes) # (offset, (solid, blank))
validate_legend_loc = ValidateInStrings('legend_loc',[
'best',
'upper right',
'upper left',
'lower left',
'lower right',
'right',
'center left',
'center right',
'lower center',
'upper center',
'center',
], ignorecase=True)
class ValidateInterval:
"""
Value must be in interval
"""
def __init__(self, vmin, vmax, closedmin=True, closedmax=True):
self.vmin = vmin
self.vmax = vmax
self.cmin = closedmin
self.cmax = closedmax
def __call__(self, s):
try: s = float(s)
except: raise RuntimeError('Value must be a float; found "%s"'%s)
if self.cmin and s<self.vmin:
raise RuntimeError('Value must be >= %f; found "%f"'%(self.vmin, s))
elif not self.cmin and s<=self.vmin:
raise RuntimeError('Value must be > %f; found "%f"'%(self.vmin, s))
if self.cmax and s>self.vmax:
raise RuntimeError('Value must be <= %f; found "%f"'%(self.vmax, s))
elif not self.cmax and s>=self.vmax:
raise RuntimeError('Value must be < %f; found "%f"'%(self.vmax, s))
return s
# a map from key -> value, converter
defaultParams = {
'backend' : ['Agg', validate_backend], # agg is certainly present
'backend_fallback' : [True, validate_bool], # agg is certainly present
'numerix' : ['numpy', validate_numerix],
'maskedarray' : [False, validate_bool],
'toolbar' : ['toolbar2', validate_toolbar],
'datapath' : [None, validate_path_exists], # handled by _get_data_path_cached
'units' : [False, validate_bool],
'interactive' : [False, validate_bool],
'timezone' : ['UTC', str],
# the verbosity setting
'verbose.level' : ['silent', validate_verbose],
'verbose.fileo' : ['sys.stdout', str],
# line props
'lines.linewidth' : [1.0, validate_float], # line width in points
'lines.linestyle' : ['-', str], # solid line
'lines.color' : ['b', validate_color], # blue
'lines.marker' : ['None', str], # black
'lines.markeredgewidth' : [0.5, validate_float],
'lines.markersize' : [6, validate_float], # markersize, in points
'lines.antialiased' : [True, validate_bool], # antialised (no jaggies)
'lines.dash_joinstyle' : ['miter', validate_joinstyle],
'lines.solid_joinstyle' : ['miter', validate_joinstyle],
'lines.dash_capstyle' : ['butt', validate_capstyle],
'lines.solid_capstyle' : ['projecting', validate_capstyle],
# patch props
'patch.linewidth' : [1.0, validate_float], # line width in points
'patch.edgecolor' : ['k', validate_color], # black
'patch.facecolor' : ['b', validate_color], # blue
'patch.antialiased' : [True, validate_bool], # antialised (no jaggies)
# font props
'font.family' : ['sans-serif', str], # used by text object
'font.style' : ['normal', str], #
'font.variant' : ['normal', str], #
'font.stretch' : ['normal', str], #
'font.weight' : ['normal', str], #
'font.size' : [12.0, validate_float], #
'font.serif' : [['Bitstream Vera Serif', 'DejaVu Serif',
'New Century Schoolbook', 'Century Schoolbook L',
'Utopia', 'ITC Bookman', 'Bookman',
'Nimbus Roman No9 L','Times New Roman',
'Times','Palatino','Charter','serif'],
validate_stringlist],
'font.sans-serif' : [['Bitstream Vera Sans', 'DejaVu Sans',
'Lucida Grande', 'Verdana', 'Geneva', 'Lucid',
'Arial', 'Helvetica', 'Avant Garde', 'sans-serif'],
validate_stringlist],
'font.cursive' : [['Apple Chancery','Textile','Zapf Chancery',
'Sand','cursive'], validate_stringlist],
'font.fantasy' : [['Comic Sans MS','Chicago','Charcoal','Impact'
'Western','fantasy'], validate_stringlist],
'font.monospace' : [['Bitstream Vera Sans Mono', 'DejaVu Sans Mono',
'Andale Mono', 'Nimbus Mono L', 'Courier New',
'Courier','Fixed', 'Terminal','monospace'],
validate_stringlist],
# text props
'text.color' : ['k', validate_color], # black
'text.usetex' : [False, validate_bool],
'text.latex.unicode' : [False, validate_bool],
'text.latex.preamble' : [[''], validate_stringlist],
'text.dvipnghack' : [None, validate_bool_maybe_none],
'text.fontstyle' : ['normal', str],
'text.fontangle' : ['normal', str],
'text.fontvariant' : ['normal', str],
'text.fontweight' : ['normal', str],
'text.fontsize' : ['medium', validate_fontsize],
'mathtext.cal' : ['cursive', validate_font_properties],
'mathtext.rm' : ['serif', validate_font_properties],
'mathtext.tt' : ['monospace', validate_font_properties],
'mathtext.it' : ['serif:italic', validate_font_properties],
'mathtext.bf' : ['serif:bold', validate_font_properties],
'mathtext.sf' : ['sans\-serif', validate_font_properties],
'mathtext.fontset' : ['cm', validate_fontset],
'mathtext.fallback_to_cm' : [True, validate_bool],
'image.aspect' : ['equal', validate_aspect], # equal, auto, a number
'image.interpolation' : ['bilinear', str],
'image.cmap' : ['jet', str], # one of gray, jet, etc
'image.lut' : [256, validate_int], # lookup table
'image.origin' : ['upper', str], # lookup table
'image.resample' : [False, validate_bool],
'contour.negative_linestyle' : ['dashed', validate_negative_linestyle_legacy],
# axes props
'axes.axisbelow' : [False, validate_bool],
'axes.hold' : [True, validate_bool],
'axes.facecolor' : ['w', validate_color], # background color; white
'axes.edgecolor' : ['k', validate_color], # edge color; black
'axes.linewidth' : [1.0, validate_float], # edge linewidth
'axes.titlesize' : ['large', validate_fontsize], # fontsize of the axes title
'axes.grid' : [False, validate_bool], # display grid or not
'axes.labelsize' : ['medium', validate_fontsize], # fontsize of the x any y labels
'axes.labelcolor' : ['k', validate_color], # color of axis label
'axes.formatter.limits' : [[-7, 7], validate_nseq_int(2)],
# use scientific notation if log10
# of the axis range is smaller than the
# first or larger than the second
'axes.unicode_minus' : [True, validate_bool],
'polaraxes.grid' : [True, validate_bool], # display polar grid or not
#legend properties
'legend.fancybox' : [False,validate_bool],
'legend.loc' : ['upper right',validate_legend_loc], # at some point, this should be changed to 'best'
'legend.isaxes' : [True,validate_bool], # this option is internally ignored - it never served any useful purpose
'legend.numpoints' : [2, validate_int], # the number of points in the legend line
'legend.fontsize' : ['large', validate_fontsize],
'legend.pad' : [0, validate_float], # was 0.2, deprecated; the fractional whitespace inside the legend border
'legend.borderpad' : [0.4, validate_float], # units are fontsize
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.02, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
'legend.labelspacing' : [0.5, validate_float], # the vertical space between the legend entries
'legend.handlelength' : [2., validate_float], # the length of the legend lines
'legend.handletextpad' : [.8, validate_float], # the space between the legend line and legend text
'legend.borderaxespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.columnspacing' : [2., validate_float], # the border between the axes and legend edge
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
# tick properties
'xtick.major.size' : [4, validate_float], # major xtick size in points
'xtick.minor.size' : [2, validate_float], # minor xtick size in points
'xtick.major.pad' : [4, validate_float], # distance to label in points
'xtick.minor.pad' : [4, validate_float], # distance to label in points
'xtick.color' : ['k', validate_color], # color of the xtick labels
'xtick.labelsize' : ['medium', validate_fontsize], # fontsize of the xtick labels
'xtick.direction' : ['in', str], # direction of xticks
'ytick.major.size' : [4, validate_float], # major ytick size in points
'ytick.minor.size' : [2, validate_float], # minor ytick size in points
'ytick.major.pad' : [4, validate_float], # distance to label in points
'ytick.minor.pad' : [4, validate_float], # distance to label in points
'ytick.color' : ['k', validate_color], # color of the ytick labels
'ytick.labelsize' : ['medium', validate_fontsize], # fontsize of the ytick labels
'ytick.direction' : ['in', str], # direction of yticks
'grid.color' : ['k', validate_color], # grid color
'grid.linestyle' : [':', str], # dotted
'grid.linewidth' : [0.5, validate_float], # in points
# figure props
# figure size in inches: width by height
'figure.figsize' : [ [8.0,6.0], validate_nseq_float(2)],
'figure.dpi' : [ 80, validate_float], # DPI
'figure.facecolor' : [ '0.75', validate_color], # facecolor; scalar gray
'figure.edgecolor' : [ 'w', validate_color], # edgecolor; white
'figure.autolayout' : [ False, validate_autolayout],
'figure.subplot.left' : [0.125, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.right' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.bottom' : [0.1, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.top' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.wspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'figure.subplot.hspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'savefig.dpi' : [100, validate_float], # DPI
'savefig.facecolor' : ['w', validate_color], # facecolor; white
'savefig.edgecolor' : ['w', validate_color], # edgecolor; white
'savefig.orientation' : ['portrait', validate_orientation], # edgecolor; white
'cairo.format' : ['png', validate_cairo_format],
'tk.window_focus' : [False, validate_bool], # Maintain shell focus for TkAgg
'tk.pythoninspect' : [False, validate_bool], # Set PYTHONINSPECT
'ps.papersize' : ['letter', validate_ps_papersize], # Set the papersize/type
'ps.useafm' : [False, validate_bool], # Set PYTHONINSPECT
'ps.usedistiller' : [False, validate_ps_distiller], # use ghostscript or xpdf to distill ps output
'ps.distiller.res' : [6000, validate_int], # dpi
'ps.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'pdf.compression' : [6, validate_int], # compression level from 0 to 9; 0 to disable
'pdf.inheritcolor' : [False, validate_bool], # ignore any color-setting commands from the frontend
'pdf.use14corefonts' : [False, validate_bool], # use only the 14 PDF core fonts
# embedded in every PDF viewing application
'pdf.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'svg.image_inline' : [True, validate_bool], # write raster image data directly into the svg file
'svg.image_noscale' : [False, validate_bool], # suppress scaling of raster data embedded in SVG
'svg.embed_char_paths' : [True, validate_bool], # True to save all characters as paths in the SVG
'docstring.hardcopy' : [False, validate_bool], # set this when you want to generate hardcopy docstring
'plugins.directory' : ['.matplotlib_plugins', str], # where plugin directory is locate
'path.simplify' : [False, validate_bool],
'agg.path.chunksize' : [0, validate_int] # 0 to disable chunking;
# recommend about 20000 to
# enable. Experimental.
}
if __name__ == '__main__':
rc = defaultParams
rc['datapath'][0] = '/'
for key in rc:
if not rc[key][1](rc[key][0]) == rc[key][0]:
print "%s: %s != %s"%(key, rc[key][1](rc[key][0]), rc[key][0])
| gpl-3.0 |
aneic/aneic-scripts | 130219_01_mfm_sigma_1.0_a_1.001_1_cv_5/analysis_mfm_cv.py | 1 | 5981 | import pandas as pd
import numpy as np
import pickle
import gzip
from glob import glob
from collections import defaultdict
import sys
sys.path.append('./aneic-core/src')
import aneic.mfm as mfm
# oracle grouping as specified by amy
oracle = \
[['BM118218_C20', 'BM118250_C19', 'BM118252_C18', 'BM118219_C16',
'BM118251_C17', 'BM118158_C15', 'BM118159_C12'],
['BM8015', 'BM8007', '59.107.107_Met'],
['BM9760', 'BM7995', 'BM10447', '65.924Bost'],
['BM126667_S176', 'BM118234_S172', 'BM118233_S173', 'BM126666_S175',
'BM118238_S174', 'BM126963_S192', 'BM118237_S193', 'BM118198_S197',
'BM118228_U6_plLXXI', 'BM118217_T5', 'BM118236_S212_Fig1full',
'BM118236_S212_Fig2frg', 'BM1 18230_S181', 'BM118193_S198',
'BM118203_S190', 'BM118202_S194', 'BM118181_S182', 'BM118229_U7',
'BM118231_S213_fig1', 'BM118197_S207_fig1', 'BM118197_S207_fig2',
'BM126675_S206_fig1Dark', 'BM126675_S206_fig2Light',
'BM126676_S209a-b']
]
# run options
opts = {
# run series
'series' : '130219_01',
# prefix for saving file
'outdir' : 'results',
# dataset to load from
'data' : './aneic-core/data/levantine_corpus_gansell.csv',
# any strings to match to empty field
'na_values' : ['', ' '],
}
if __name__ == '__main__':
# load data
data = pd.read_csv('./%s' %(opts['data']), index_col=0,
na_values=opts['na_values'])
site = mfm.convert_indicator(pd.DataFrame(data['Site'], columns=['Site']))
data = data.drop(['Site'], axis=1)
# split real and cat feats
cat = mfm.convert_indicator(data[data.columns[data.dtypes == np.object]])
real = data[data.columns[data.dtypes == np.float64]]
# number of examples
N = data.shape[0]
# run results
best = defaultdict(lambda: defaultdict(lambda: {}))
site_counts = defaultdict(lambda: defaultdict(lambda: {}))
L = defaultdict(lambda: defaultdict(lambda: 0.0))
Lh = defaultdict(lambda: defaultdict(lambda: 0.0))
l = defaultdict(lambda: defaultdict(lambda: 0.0))
lh = defaultdict(lambda: defaultdict(lambda: 0.0))
kl_g = defaultdict(lambda: defaultdict(lambda: 0.0))
d_g = defaultdict(lambda: defaultdict(lambda: 0.0))
e_g = defaultdict(lambda: defaultdict(lambda: 0.0))
for f in glob('results/*best*.pk.gz'):
K = np.int(f.split('_')[2][1:])
a = np.float(f.split('_')[3][1:])
fold = np.int(f.split('_')[4][4:])
best[a][K][fold] = pickle.load(gzip.open(f))
L[a][K] += best[a][K][fold]['L'][-1]
Lh[a][K] += best[a][K][fold]['L_test']
# log joint of held out data
u = best[a][K][fold]['u']
theta = best[a][K][fold]['theta']
real = best[a][K][fold]['real']
cat = best[a][K][fold]['cat']
l[a][K] += best[a][K][fold]['L'][-1] \
- mfm._log_pq(theta['mu'], theta['l'], theta['rho'], theta['pi'],
u['a'], u['b']).sum()
lh[a][K] += best[a][K][fold]['L_test'] \
- mfm._log_pq(theta['mu'], theta['l'], theta['rho'], theta['pi'],
u['a'], u['b']).sum()
# agreement with oracle
gamma = best[a][K][fold]['gamma']
flat = lambda iterable: [i for it in iterable for i in it]
o_index = pd.Index([o for o in flat(oracle) if o in gamma.index])
o_z = np.array([z for z,o_clust in enumerate(oracle) for o in o_clust if o in gamma.index])
o_gamma = pd.DataFrame((o_z[:,None]==np.arange(len(oracle))[None,:]).astype('i'), index=o_index)
kl_g[a][K] += mfm.kl_gamma(gamma, o_gamma, eps=1)
d_g[a][K] += mfm.d_gamma(gamma, o_gamma)
e_g[a][K] += mfm.err_gamma(gamma, o_gamma)
# correlation of sites with clustering
site_counts[a][K][fold] = pd.concat([(gamma[k].T * site.T).sum(1) for k in gamma], axis=1)
c_NA = defaultdict(lambda: defaultdict(lambda: 0.0))
c_AK = defaultdict(lambda: defaultdict(lambda: 0.0))
c_KN = defaultdict(lambda: defaultdict(lambda: 0.0))
for a in site_counts:
for k in site_counts[a]:
S = site.shape[1]
s_counts = np.zeros((S,S,2))
for f in site_counts[a][k]:
# s_c[s,k] = number of observations at site s in state k
s_c = np.array(site_counts[a][k][f])
# p(z=k | l=s)
pk_s = mfm.norm(s_c + 1e-6, 1).T
# p(z1=k,z2=l | l1=s, l2=t)
pkl_st = pk_s[:,None,:,None] * pk_s[None,:,None,:]
# pi_st = sum_k p(z1=k, z2=k | l1=s, l2=t)
pi_st = np.sum([pkl_st[l,l,:,:] for l in range(s_c.shape[1])], 0)
# pi_st[:,:,1] = 1 - sum_k p(z1=k, z2=k | l1=s, l2=t)
s_counts[:,:,0] += pi_st
s_counts[:,:,1] += 1 - pi_st
s_corr = pd.DataFrame(mfm.norm(s_counts, 2)[:,:,0],
index=site.columns.levels[1], columns=site.columns.levels[1])
c_AK[a][k] = s_corr.ix['ArslanTash', 'Khorsabad']
c_KN[a][k] = s_corr.ix['Khorsabad', 'Nimrud']
c_NA[a][k] = s_corr.ix['Nimrud', 'ArslanTash']
# L = pd.DataFrame(L)
# Lh = pd.DataFrame(Lh)
# lh = pd.DataFrame(lh)
# l = pd.DataFrame(l)
# kl_g = pd.DataFrame(kl_g)
# for var in ['d_g', 'e_g', 'c_AK', 'c_KN', 'c_NA']:
# locals()[var] = pd.DataFrame(locals()[var]) / 5
# L = pd.concat([pd.DataFrame(L[a], index=L[a], columns=[a])
# for a in L], axis=1).reindex(columns=sorted(L))
# Lh = pd.concat([pd.DataFrame(Lh[s], index=Lh[a], columns=[a])
# for a in Lh], axis=1).reindex(columns=sorted(Lh))
# a = pd.concat([pd.DataFrame([best[a][k][0]['u']['a']
# for k in best[s]], index=best[a], columns=[a])
# for a in best], axis=1).reindex(columns=sorted(best))
| mit |
ntucllab/libact | examples/plot.py | 1 | 3390 | #!/usr/bin/env python3
"""
The script helps guide the users to quickly understand how to use
libact by going through a simple active learning task with clear
descriptions.
"""
import copy
import os
import numpy as np
import matplotlib.pyplot as plt
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
# libact classes
from libact.base.dataset import Dataset, import_libsvm_sparse
from libact.models import LogisticRegression
from libact.query_strategies import RandomSampling, UncertaintySampling
from libact.labelers import IdealLabeler
def run(trn_ds, tst_ds, lbr, model, qs, quota):
E_in, E_out = [], []
for _ in range(quota):
# Standard usage of libact objects
ask_id = qs.make_query()
lb = lbr.label(trn_ds.data[ask_id][0])
trn_ds.update(ask_id, lb)
model.train(trn_ds)
E_in = np.append(E_in, 1 - model.score(trn_ds))
E_out = np.append(E_out, 1 - model.score(tst_ds))
return E_in, E_out
def split_train_test(dataset_filepath, test_size, n_labeled):
X, y = import_libsvm_sparse(dataset_filepath).format_sklearn()
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=test_size)
trn_ds = Dataset(X_train, np.concatenate(
[y_train[:n_labeled], [None] * (len(y_train) - n_labeled)]))
tst_ds = Dataset(X_test, y_test)
fully_labeled_trn_ds = Dataset(X_train, y_train)
return trn_ds, tst_ds, y_train, fully_labeled_trn_ds
def main():
# Specifiy the parameters here:
# path to your binary classification dataset
dataset_filepath = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'diabetes.txt')
test_size = 0.33 # the percentage of samples in the dataset that will be
# randomly selected and assigned to the test set
n_labeled = 10 # number of samples that are initially labeled
# Load dataset
trn_ds, tst_ds, y_train, fully_labeled_trn_ds = \
split_train_test(dataset_filepath, test_size, n_labeled)
trn_ds2 = copy.deepcopy(trn_ds)
lbr = IdealLabeler(fully_labeled_trn_ds)
quota = len(y_train) - n_labeled # number of samples to query
# Comparing UncertaintySampling strategy with RandomSampling.
# model is the base learner, e.g. LogisticRegression, SVM ... etc.
qs = UncertaintySampling(trn_ds, method='lc', model=LogisticRegression())
model = LogisticRegression()
E_in_1, E_out_1 = run(trn_ds, tst_ds, lbr, model, qs, quota)
qs2 = RandomSampling(trn_ds2)
model = LogisticRegression()
E_in_2, E_out_2 = run(trn_ds2, tst_ds, lbr, model, qs2, quota)
# Plot the learning curve of UncertaintySampling to RandomSampling
# The x-axis is the number of queries, and the y-axis is the corresponding
# error rate.
query_num = np.arange(1, quota + 1)
plt.plot(query_num, E_in_1, 'b', label='qs Ein')
plt.plot(query_num, E_in_2, 'r', label='random Ein')
plt.plot(query_num, E_out_1, 'g', label='qs Eout')
plt.plot(query_num, E_out_2, 'k', label='random Eout')
plt.xlabel('Number of Queries')
plt.ylabel('Error')
plt.title('Experiment Result')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
fancybox=True, shadow=True, ncol=5)
plt.show()
if __name__ == '__main__':
main()
| bsd-2-clause |
AlexRobson/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
osvaldshpengler/BuildingMachineLearningSystemsWithPython | ch06/03_clean.py | 22 | 5972 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script tries to improve the classifier by cleaning the tweets a bit
#
import time
start_time = time.time()
import re
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.pipeline import Pipeline
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from utils import log_false_positives
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from utils import load_sent_word_net
sent_word_net = load_sent_word_net()
phase = "03"
emo_repl = {
# positive emoticons
"<3": " good ",
":d": " good ", # :D in lower case
":dd": " good ", # :DD in lower case
"8)": " good ",
":-)": " good ",
":)": " good ",
";)": " good ",
"(-:": " good ",
"(:": " good ",
# negative emoticons:
":/": " bad ",
":>": " sad ",
":')": " sad ",
":-(": " bad ",
":(": " bad ",
":S": " bad ",
":-S": " bad ",
}
emo_repl_order = [k for (k_len, k) in reversed(
sorted([(len(k), k) for k in list(emo_repl.keys())]))]
re_repl = {
r"\br\b": "are",
r"\bu\b": "you",
r"\bhaha\b": "ha",
r"\bhahaha\b": "ha",
r"\bdon't\b": "do not",
r"\bdoesn't\b": "does not",
r"\bdidn't\b": "did not",
r"\bhasn't\b": "has not",
r"\bhaven't\b": "have not",
r"\bhadn't\b": "had not",
r"\bwon't\b": "will not",
r"\bwouldn't\b": "would not",
r"\bcan't\b": "can not",
r"\bcannot\b": "can not",
}
def create_ngram_model(params=None):
def preprocessor(tweet):
global emoticons_replaced
tweet = tweet.lower()
for k in emo_repl_order:
tweet = tweet.replace(k, emo_repl[k])
for r, repl in re_repl.items():
tweet = re.sub(r, repl, tweet)
return tweet
tfidf_ngrams = TfidfVectorizer(preprocessor=preprocessor,
analyzer="word")
clf = MultinomialNB()
pipeline = Pipeline([('tfidf', tfidf_ngrams), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
clfs = [] # just to later get the median
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
log_false_positives(clfs[median], X_test, y_test, name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
def get_best_model():
best_params = dict(tfidf__ngram_range=(1, 2),
tfidf__min_df=1,
tfidf__stop_words=None,
tfidf__smooth_idf=False,
tfidf__use_idf=False,
tfidf__sublinear_tf=True,
tfidf__binary=False,
clf__alpha=0.01,
)
best_clf = create_ngram_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_union_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos+neg vs rest", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print("time spent:", time.time() - start_time)
| mit |
jorge2703/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/svm/plot_custom_kernel.py | 115 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
SciTools/iris | lib/iris/tests/unit/plot/test_outline.py | 5 | 2482 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.plot.outline` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.outline(self.cube, coords=("bar", "str_coord"))
self.assertBoundsTickLabels("yaxis")
def test_xaxis_labels(self):
iplt.outline(self.cube, coords=("str_coord", "bar"))
self.assertBoundsTickLabels("xaxis")
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, 3)
iplt.outline(self.cube, coords=("str_coord", "bar"), axes=ax)
plt.close(fig)
self.assertPointsTickLabels("xaxis", ax)
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim(0, 3)
iplt.outline(self.cube, axes=ax, coords=("bar", "str_coord"))
plt.close(fig)
self.assertPointsTickLabels("yaxis", ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.outline, self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=True)
coord = self.cube.coord("foo")
self.foo = coord.contiguous_bounds()
self.foo_index = np.arange(coord.points.size + 1)
coord = self.cube.coord("bar")
self.bar = coord.contiguous_bounds()
self.bar_index = np.arange(coord.points.size + 1)
self.data = self.cube.data
self.dataT = self.data.T
self.mpl_patch = self.patch("matplotlib.pyplot.pcolormesh")
self.draw_func = iplt.outline
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
etkirsch/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
maxlikely/scikit-learn | examples/svm/plot_svm_kernels.py | 4 | 1930 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly seperable.
"""
print(__doc__)
# Code source: Gael Varoqueux
# License: BSD
import numpy as np
import pylab as pl
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
pl.figure(fignum, figsize=(4, 3))
pl.clf()
pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
pl.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=pl.cm.Paired)
pl.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
pl.figure(fignum, figsize=(4, 3))
pl.pcolormesh(XX, YY, Z > 0, cmap=pl.cm.Paired)
pl.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
fignum = fignum + 1
pl.show()
| bsd-3-clause |
newville/scikit-image | doc/examples/plot_edge_filter.py | 14 | 2258 | """
==============
Edge operators
==============
Edge operators are used in image processing within edge detection algorithms.
They are discrete differentiation operators, computing an approximation of the
gradient of the image intensity function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.data import camera
from skimage.filters import roberts, sobel, scharr
image = camera()
edge_roberts = roberts(image)
edge_sobel = sobel(image)
fig, (ax0, ax1) = plt.subplots(ncols=2)
ax0.imshow(edge_roberts, cmap=plt.cm.gray)
ax0.set_title('Roberts Edge Detection')
ax0.axis('off')
ax1.imshow(edge_sobel, cmap=plt.cm.gray)
ax1.set_title('Sobel Edge Detection')
ax1.axis('off')
plt.tight_layout()
"""
.. image:: PLOT2RST.current_figure
Different operators compute different finite-difference approximations of the
gradient. For example, the Scharr filter results in a better rotational
variance than other filters such as the Sobel filter [1]_ [2]_. The difference
between the two filters is illustrated below on an image that is the
discretization of a rotation-invariant continuous function. The discrepancy
between the two filters is stronger for regions of the image where the
direction of the gradient is close to diagonal, and for regions with high
spatial frequencies.
.. [1] http://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators
.. [2] B. Jaehne, H. Scharr, and S. Koerkel. Principles of filter design. In
Handbook of Computer Vision and Applications. Academic Press, 1999.
"""
x, y = np.ogrid[:100, :100]
# Rotation-invariant image with different spatial frequencies
img = np.exp(1j * np.hypot(x, y)**1.3 / 20.).real
edge_sobel = sobel(img)
edge_scharr = scharr(img)
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2, ncols=2)
ax0.imshow(edge_sobel, cmap=plt.cm.gray)
ax0.set_title('Sobel Edge Detection')
ax0.axis('off')
ax1.imshow(edge_scharr, cmap=plt.cm.gray)
ax1.set_title('Scharr Edge Detection')
ax1.axis('off')
ax2.imshow(img, cmap=plt.cm.gray)
ax2.set_title('Original image')
ax2.axis('off')
ax3.imshow(edge_scharr - edge_sobel, cmap=plt.cm.jet)
ax3.set_title('difference (Scharr - Sobel)')
ax3.axis('off')
plt.tight_layout()
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
| bsd-3-clause |
ashhher3/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
calhewitt/lucid-utils | lucid_utils/frameplot.py | 2 | 1871 | import matplotlib as mpl
import os
if not "DISPLAY" in os.environ: # Make MPL Work if no display is available
mpl.use('Agg')
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_agg import FigureCanvasAgg
def get_image(frame, colourmode = "BW", normalise=False):
fig = plt.figure(frameon=False, figsize=(256,256), dpi=1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
cmap = cm.hot
cmap.set_under("#82bcff")
vm = np.max(frame) if (np.count_nonzero(frame) > 0) else 2
ax.imshow(frame, vmin = 1, vmax=vm, cmap = cmap, interpolation='none')
canvas = plt.get_current_fig_manager().canvas
agg = canvas.switch_backends(FigureCanvasAgg)
agg.draw()
s = agg.tostring_rgb()
l, b, w, h = agg.figure.bbox.bounds
w, h = int(w), int(h)
X = np.fromstring(s, np.uint8)
X.shape = h, w, 3
plt.close()
try:
im = Image.fromstring("RGB", (w, h), s)
except Exception:
im = Image.frombytes("RGB", (w, h), s)
return im
def show_frame(frame):
if not "DISPLAY" in os.environ:
raise Exception("No display available")
fig, ax = plt.subplots()
cmap = cm.hot
cmap.set_under("#82bcff")
vm = np.max(frame) if (np.count_nonzero(frame) > 0) else 2
cax = ax.imshow(frame, vmin = 1, vmax=vm, cmap = cmap, interpolation='none')
fig.colorbar(cax)
plt.show()
def fig_frame(frame):
fig, ax = plt.subplots()
cmap = cm.hot
cmap.set_under("#82bcff")
vm = np.max(frame) if (np.count_nonzero(frame) > 0) else 2
cax = ax.imshow(frame, vmin = 1, vmax=vm, cmap = cmap, interpolation='none')
fig.colorbar(cax)
return fig
def show_blob(blob):
xs, ys = zip(*blob)
sizex, sizey = max(xs) - min(xs), max(ys) - min(ys)
toshow = np.zeros((sizex+3,sizey+3))
for i in range(len(xs)):
toshow[1 + xs[i] - min(xs)][1 + ys[i] - min(ys)] = 1
show_frame(toshow)
| mit |
arboreus/exercises | nn_test_part4.py | 1 | 10249 | #%% Clear variable list
def clearall():
"""clear all globals"""
for uniquevar in [var for var in globals().copy() if var[0] != "_" and var != 'clearall']:
del globals()[uniquevar]
clearall()
#%%
# Python imports
import numpy as np # Matrix and vector computation package
import matplotlib.pyplot as plt # Plotting library
from matplotlib.colors import colorConverter, ListedColormap # some plotting functions
from mpl_toolkits.mplot3d import Axes3D # 3D plots
from matplotlib import cm # Colormaps
import seaborn as sns
# Allow matplotlib to plot inside this notebook
#%matplotlib inline
# Set the seed of the numpy random number generator so that the tutorial is reproducable
np.random.seed(seed=1)
#%%
# Define and generate the samples
nb_of_samples_per_class = 50 # The number of sample in each class
# Generate blue samples
blue_mean = [0,0] # The mean of the blue class
blue_std_dev = 0.3 # standard deviation of blue class
x_blue = np.random.randn(nb_of_samples_per_class, 2) * blue_std_dev + blue_mean
# Generate red samples as circle around blue samples
red_radius_mean = 1.3 # mean of the radius
red_radius_std_dev = 0.2 # standard deviation of the radius
red_rand_radius = np.random.randn(nb_of_samples_per_class) * red_radius_std_dev + red_radius_mean
red_rand_angle = 2 * np.pi * np.random.rand(nb_of_samples_per_class);
x_red = np.asmatrix([red_rand_radius * np.cos(red_rand_angle),
red_rand_radius * np.sin(red_rand_angle)]).T
# Define target vectors for blue and red
t_blue_vector = np.asarray([1, 0])
t_red_vector = np.asarray([0, 1])
# Define the full target matrix for each class
t_blue = np.tile(t_blue_vector, (nb_of_samples_per_class, 1))
t_red = np.tile(t_red_vector, (nb_of_samples_per_class, 1))
# Merge samples in set of input variables x, and corresponding set of
# output variables t
X = np.vstack((x_blue, x_red))
T = np.vstack((t_blue, t_red))
# Plot both classes on the x1, x2 plane
plt.plot(x_red[:,0], x_red[:,1], 'ro', label='class red')
plt.plot(x_blue[:,0], x_blue[:,1], 'bo', label='class blue')
plt.grid()
plt.legend(loc=2)
plt.xlabel('$x_1$', fontsize=15)
plt.ylabel('$x_2$', fontsize=15)
plt.axis([-2, 2, -2, 2])
plt.title('red vs blue classes in the input space')
plt.show()
#%%
# Define the logistic function
def logistic(z): return 1 / (1 + np.exp(-z))
# Define the softmax function
def softmax(z): return np.exp(z) / np.sum(np.exp(z), axis=1)
# Function to compute the hidden activations
def hidden_activations(X, Wh, bh):
return logistic(X * Wh + bh)
# Define output layer feedforward
def output_activations(H, Wo, bo):
return softmax(H * Wo + bo)
# Define the neural network function
def nn(X, Wh, bh, Wo, bo):
return output_activations(hidden_activations(X, Wh, bh), Wo, bo)
# Define the neural network prediction function that only returns
# 1 or 0 depending on the predicted class
def nn_predict(X, Wh, bh, Wo, bo):
return np.around(nn(X, Wh, bh, Wo, bo))
# Define the cost function
def cost(Y, T):
return - np.multiply(T, np.log(Y)).sum()
# Define the error function at the output
def error_output(Y, T):
return Y - T
# Define the gradient function for the weight parameters at the output layer
def gradient_weight_out(H, Eo):
return H.T * Eo
# Define the gradient function for the bias parameters at the output layer
def gradient_bias_out(Eo):
return np.sum(Eo, axis=0)
# Define the error function at the hidden layer
def error_hidden(H, Wo, Eo):
# H * (1-H) * (E . Wo^T)
return np.multiply(np.multiply(H,(1 - H)), Eo.dot(Wo.T))
# Define the gradient function for the weight parameters at the hidden layer
def gradient_weight_hidden(X, Eh):
return X.T * Eh
# Define the gradient function for the bias parameters at the output layer
def gradient_bias_hidden(Eh):
return np.sum(Eh, axis=0)
#%%
# Initialize weights and biases
init_var = 1
# Initialize hidden layer parameters
bh = np.random.randn(1, 3) * init_var
Wh = np.random.randn(2, 3) * init_var
# Initialize output layer parameters
bo = np.random.randn(1, 2) * init_var
Wo = np.random.randn(3, 2) * init_var
# Compute the gradients by backpropagation
# Compute the activations of the layers
H = hidden_activations(X, Wh, bh)
Y = output_activations(H, Wo, bo)
# Compute the gradients of the output layer
Eo = error_output(Y, T)
JWo = gradient_weight_out(H, Eo)
Jbo = gradient_bias_out(Eo)
# Compute the gradients of the hidden layer
Eh = error_hidden(H, Wo, Eo)
JWh = gradient_weight_hidden(X, Eh)
Jbh = gradient_bias_hidden(Eh)
# Combine all parameter matrices in a list
params = [Wh, bh, Wo, bo]
# Combine all parameter gradients in a list
grad_params = [JWh, Jbh, JWo, Jbo]
# Set the small change to compute the numerical gradient
eps = 0.0001
# Check each parameter matrix
for p_idx in range(len(params)):
# Check each parameter in each parameter matrix
for row in range(params[p_idx].shape[0]):
for col in range(params[p_idx].shape[1]):
# Copy the parameter matrix and change the current parameter slightly
p_matrix_min = params[p_idx].copy()
p_matrix_min[row,col] -= eps
p_matrix_plus = params[p_idx].copy()
p_matrix_plus[row,col] += eps
# Copy the parameter list, and change the updated parameter matrix
params_min = params[:]
params_min[p_idx] = p_matrix_min
params_plus = params[:]
params_plus[p_idx] = p_matrix_plus
# Compute the numerical gradient
grad_num = (cost(nn(X, *params_plus), T)-cost(nn(X, *params_min), T))/(2*eps)
print('backprop gradient: {:.6f} ; numerical gradient {:.6f}'.format(grad_params[p_idx][row,col], grad_num))
# Raise error if the numerical grade is not close to the backprop gradient
if not np.isclose(grad_num, grad_params[p_idx][row,col]):
raise ValueError('Numerical gradient is not close to the backpropagation gradient!')
#%%
# Define the update function to update the network parameters over 1 iteration
def backprop_gradients(X, T, Wh, bh, Wo, bo):
# Compute the output of the network
# Compute the activations of the layers
H = hidden_activations(X, Wh, bh)
Y = output_activations(H, Wo, bo)
# Compute the gradients of the output layer
Eo = error_output(Y, T)
JWo = gradient_weight_out(H, Eo)
Jbo = gradient_bias_out(Eo)
# Compute the gradients of the hidden layer
Eh = error_hidden(H, Wo, Eo)
JWh = gradient_weight_hidden(X, Eh)
Jbh = gradient_bias_hidden(Eh)
return [JWh, Jbh, JWo, Jbo]
def update_velocity(X, T, ls_of_params, Vs, momentum_term, learning_rate):
# ls_of_params = [Wh, bh, Wo, bo]
# Js = [JWh, Jbh, JWo, Jbo]
Js = backprop_gradients(X, T, *ls_of_params)
return [momentum_term * V - learning_rate * J for V,J in zip(Vs, Js)]
def update_params(ls_of_params, Vs):
# ls_of_params = [Wh, bh, Wo, bo]
# Vs = [VWh, Vbh, VWo, Vbo]
return [P + V for P,V in zip(ls_of_params, Vs)]
# Run backpropagation
# Initialize weights and biases
init_var = 0.1
# Initialize hidden layer parameters
bh = np.random.randn(1, 3) * init_var
Wh = np.random.randn(2, 3) * init_var
# Initialize output layer parameters
bo = np.random.randn(1, 2) * init_var
Wo = np.random.randn(3, 2) * init_var
# Parameters are already initilized randomly with the gradient checking
# Set the learning rate
learning_rate = 0.02
momentum_term = 0.9
# define the velocities Vs = [VWh, Vbh, VWo, Vbo]
Vs = [np.zeros_like(M) for M in [Wh, bh, Wo, bo]]
# Start the gradient descent updates and plot the iterations
nb_of_iterations = 300 # number of gradient descent updates
lr_update = learning_rate / nb_of_iterations # learning rate update rule
ls_costs = [] # list of cost over the iterations
for i in range(nb_of_iterations):
# Add the current cost to the cost list
current_cost = cost(nn(X, Wh, bh, Wo, bo), T)
ls_costs.append(current_cost)
Vs = update_velocity(X, T, [Wh, bh, Wo, bo], Vs, momentum_term, learning_rate)
Wh, bh, Wo, bo = update_params([Wh, bh, Wo, bo], Vs)
# Add the final cost to the cost list
ls_costs.append(cost(nn(X, Wh, bh, Wo, bo), T))
# Plot the cost over the iterations
plt.plot(ls_costs, 'b-')
plt.xlabel('iteration')
plt.ylabel('$\\xi$', fontsize=15)
plt.title('Decrease of cost over backprop iteration')
plt.grid()
plt.show()
#%%
# Plot the resulting decision boundary
# Generate a grid over the input space to plot the color of the
# classification at that grid point
nb_of_xs = 200
xs1 = np.linspace(-2, 2, num=nb_of_xs)
xs2 = np.linspace(-2, 2, num=nb_of_xs)
xx, yy = np.meshgrid(xs1, xs2) # create the grid
# Initialize and fill the classification plane
classification_plane = np.zeros((nb_of_xs, nb_of_xs))
for i in range(nb_of_xs):
for j in range(nb_of_xs):
pred = nn_predict(np.asmatrix([xx[i,j], yy[i,j]]), Wh, bh, Wo, bo)
classification_plane[i,j] = pred[0,0]
# Create a color map to show the classification colors of each grid point
cmap = ListedColormap([
colorConverter.to_rgba('r', alpha=0.30),
colorConverter.to_rgba('b', alpha=0.30)])
# Plot the classification plane with decision boundary and input samples
plt.contourf(xx, yy, classification_plane, cmap=cmap)
# Plot both classes on the x1, x2 plane
plt.plot(x_red[:,0], x_red[:,1], 'ro', label='class red')
plt.plot(x_blue[:,0], x_blue[:,1], 'bo', label='class blue')
plt.grid()
plt.legend(loc=2)
plt.xlabel('$x_1$', fontsize=15)
plt.ylabel('$x_2$', fontsize=15)
plt.axis([-2, 2, -2, 2])
plt.title('red vs blue classification boundary')
plt.show()
#%%
# Define the projections of the blue and red classes
H_blue = hidden_activations(x_blue, Wh, bh)
H_red = hidden_activations(x_red, Wh, bh)
# Plot the error surface
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(np.ravel(H_blue[:,0]), np.ravel(H_blue[:,1]), np.ravel(H_blue[:,2]), 'bo')
ax.plot(np.ravel(H_red[:,0]), np.ravel(H_red[:,1]), np.ravel(H_red[:,2]), 'ro')
ax.set_xlabel('$h_1$', fontsize=15)
ax.set_ylabel('$h_2$', fontsize=15)
ax.set_zlabel('$h_3$', fontsize=15)
ax.view_init(elev=10, azim=-40)
plt.title('Projection of the input X onto the hidden layer H')
plt.grid()
plt.show() | mit |
phdowling/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
alekz112/statsmodels | statsmodels/datasets/grunfeld/data.py | 24 | 2794 | """Grunfeld (1950) Investment Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """This is the Grunfeld (1950) Investment Data.
The source for the data was the original 11-firm data set from Grunfeld's Ph.D.
thesis recreated by Kleiber and Zeileis (2008) "The Grunfeld Data at 50".
The data can be found here.
http://statmath.wu-wien.ac.at/~zeileis/grunfeld/
For a note on the many versions of the Grunfeld data circulating see:
http://www.stanford.edu/~clint/bench/grunfeld.htm
"""
DESCRSHORT = """Grunfeld (1950) Investment Data for 11 U.S. Firms."""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of observations - 220 (20 years for 11 firms)
Number of variables - 5
Variables name definitions::
invest - Gross investment in 1947 dollars
value - Market value as of Dec. 31 in 1947 dollars
capital - Stock of plant and equipment in 1947 dollars
firm - General Motors, US Steel, General Electric, Chrysler,
Atlantic Refining, IBM, Union Oil, Westinghouse, Goodyear,
Diamond Match, American Steel
year - 1935 - 1954
Note that raw_data has firm expanded to dummy variables, since it is a
string categorical variable.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Loads the Grunfeld data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
raw_data has the firm variable expanded to dummy variables for each
firm (ie., there is no reference dummy)
"""
from statsmodels.tools import categorical
data = _get_data()
raw_data = categorical(data, col='firm', drop=True)
ds = du.process_recarray(data, endog_idx=0, stack=False)
ds.raw_data = raw_data
return ds
def load_pandas():
"""
Loads the Grunfeld data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
raw_data has the firm variable expanded to dummy variables for each
firm (ie., there is no reference dummy)
"""
from pandas import DataFrame
from statsmodels.tools import categorical
data = _get_data()
raw_data = categorical(data, col='firm', drop=True)
ds = du.process_recarray_pandas(data, endog_idx=0)
ds.raw_data = DataFrame(raw_data)
return ds
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/grunfeld.csv','rb'), delimiter=",",
names=True, dtype="f8,f8,f8,a17,f8")
return data
| bsd-3-clause |
OspreyX/trading-with-python | lib/backtest.py | 74 | 7381 | #-------------------------------------------------------------------------------
# Name: backtest
# Purpose: perform routine backtesting tasks.
# This module should be useable as a stand-alone library outide of the TWP package.
#
# Author: Jev Kuznetsov
#
# Created: 03/07/2014
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import pandas as pd
import matplotlib.pyplot as plt
import sys
import numpy as np
def tradeBracket(price,entryBar,upper=None, lower=None, timeout=None):
'''
trade a bracket on price series, return price delta and exit bar #
Input
------
price : numpy array of price values
entryBar: entry bar number, *determines entry price*
upper : high stop
lower : low stop
timeout : max number of periods to hold
Returns exit price and number of bars held
'''
assert isinstance(price, np.ndarray) , 'price must be a numpy array'
# create list of exit indices and add max trade duration. Exits are relative to entry bar
if timeout: # set trade length to timeout or series length
exits = [min(timeout,len(price)-entryBar-1)]
else:
exits = [len(price)-entryBar-1]
p = price[entryBar:entryBar+exits[0]+1] # subseries of price
# extend exits list with conditional exits
# check upper bracket
if upper:
assert upper>p[0] , 'Upper bracket must be higher than entry price '
idx = np.where(p>upper)[0] # find where price is higher than the upper bracket
if idx.any():
exits.append(idx[0]) # append first occurence
# same for lower bracket
if lower:
assert lower<p[0] , 'Lower bracket must be lower than entry price '
idx = np.where(p<lower)[0]
if idx.any():
exits.append(idx[0])
exitBar = min(exits) # choose first exit
return p[exitBar], exitBar
class Backtest(object):
"""
Backtest class, simple vectorized one. Works with pandas objects.
"""
def __init__(self,price, signal, signalType='capital',initialCash = 0, roundShares=True):
"""
Arguments:
*price* Series with instrument price.
*signal* Series with capital to invest (long+,short-) or number of shares.
*sitnalType* capital to bet or number of shares 'capital' mode is default.
*initialCash* starting cash.
*roundShares* round off number of shares to integers
"""
#TODO: add auto rebalancing
# check for correct input
assert signalType in ['capital','shares'], "Wrong signal type provided, must be 'capital' or 'shares'"
#save internal settings to a dict
self.settings = {'signalType':signalType}
# first thing to do is to clean up the signal, removing nans and duplicate entries or exits
self.signal = signal.ffill().fillna(0)
# now find dates with a trade
tradeIdx = self.signal.diff().fillna(0) !=0 # days with trades are set to True
if signalType == 'shares':
self.trades = self.signal[tradeIdx] # selected rows where tradeDir changes value. trades are in Shares
elif signalType =='capital':
self.trades = (self.signal[tradeIdx]/price[tradeIdx])
if roundShares:
self.trades = self.trades.round()
# now create internal data structure
self.data = pd.DataFrame(index=price.index , columns = ['price','shares','value','cash','pnl'])
self.data['price'] = price
self.data['shares'] = self.trades.reindex(self.data.index).ffill().fillna(0)
self.data['value'] = self.data['shares'] * self.data['price']
delta = self.data['shares'].diff() # shares bought sold
self.data['cash'] = (-delta*self.data['price']).fillna(0).cumsum()+initialCash
self.data['pnl'] = self.data['cash']+self.data['value']-initialCash
@property
def sharpe(self):
''' return annualized sharpe ratio of the pnl '''
pnl = (self.data['pnl'].diff()).shift(-1)[self.data['shares']!=0] # use only days with position.
return sharpe(pnl) # need the diff here as sharpe works on daily returns.
@property
def pnl(self):
'''easy access to pnl data column '''
return self.data['pnl']
def plotTrades(self):
"""
visualise trades on the price chart
long entry : green triangle up
short entry : red triangle down
exit : black circle
"""
l = ['price']
p = self.data['price']
p.plot(style='x-')
# ---plot markers
# this works, but I rather prefer colored markers for each day of position rather than entry-exit signals
# indices = {'g^': self.trades[self.trades > 0].index ,
# 'ko':self.trades[self.trades == 0].index,
# 'rv':self.trades[self.trades < 0].index}
#
#
# for style, idx in indices.iteritems():
# if len(idx) > 0:
# p[idx].plot(style=style)
# --- plot trades
#colored line for long positions
idx = (self.data['shares'] > 0) | (self.data['shares'] > 0).shift(1)
if idx.any():
p[idx].plot(style='go')
l.append('long')
#colored line for short positions
idx = (self.data['shares'] < 0) | (self.data['shares'] < 0).shift(1)
if idx.any():
p[idx].plot(style='ro')
l.append('short')
plt.xlim([p.index[0],p.index[-1]]) # show full axis
plt.legend(l,loc='best')
plt.title('trades')
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print '\r',self,
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
| bsd-3-clause |
aflaxman/scikit-learn | sklearn/cluster/setup.py | 79 | 1855 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.pyx'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means_elkan',
sources=['_k_means_elkan.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means',
libraries=cblas_libs,
sources=['_k_means.pyx'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop(
'extra_compile_args', []),
**blas_info
)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
vicky2135/lucious | oscar/lib/python2.7/site-packages/IPython/testing/iptest.py | 7 | 16347 | # -*- coding: utf-8 -*-
"""IPython Test Suite Runner.
This module provides a main entry point to a user script to test IPython
itself from the command line. There are two ways of running this script:
1. With the syntax `iptest all`. This runs our entire test suite by
calling this script (with different arguments) recursively. This
causes modules and package to be tested in different processes, using nose
or trial where appropriate.
2. With the regular nose syntax, like `iptest -vvs IPython`. In this form
the script simply calls nose, but with special command line flags and
plugins loaded.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import glob
from io import BytesIO
import os
import os.path as path
import sys
from threading import Thread, Lock, Event
import warnings
import nose.plugins.builtin
from nose.plugins.xunit import Xunit
from nose import SkipTest
from nose.core import TestProgram
from nose.plugins import Plugin
from nose.util import safe_str
from IPython import version_info
from IPython.utils.py3compat import bytes_to_str
from IPython.utils.importstring import import_item
from IPython.testing.plugin.ipdoctest import IPythonDoctest
from IPython.external.decorators import KnownFailure, knownfailureif
pjoin = path.join
# Enable printing all warnings raise by IPython's modules
warnings.filterwarnings('ignore', message='.*Matplotlib is building the font cache.*', category=UserWarning, module='.*')
if sys.version_info > (3,0):
warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*')
warnings.filterwarnings('error', message=".*{'config': True}.*", category=DeprecationWarning, module='IPy.*')
warnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*')
warnings.filterwarnings('error', message='.*apply_wrapper.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*make_label_dec', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*decorated_dummy.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*skip_file_no_x11.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*onlyif_any_cmd_exists.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*disable_gui.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*ExceptionColors global is deprecated.*', category=DeprecationWarning, module='.*')
if version_info < (6,):
# nose.tools renames all things from `camelCase` to `snake_case` which raise an
# warning with the runner they also import from standard import library. (as of Dec 2015)
# Ignore, let's revisit that in a couple of years for IPython 6.
warnings.filterwarnings('ignore', message='.*Please use assertEqual instead', category=Warning, module='IPython.*')
# ------------------------------------------------------------------------------
# Monkeypatch Xunit to count known failures as skipped.
# ------------------------------------------------------------------------------
def monkeypatch_xunit():
try:
knownfailureif(True)(lambda: None)()
except Exception as e:
KnownFailureTest = type(e)
def addError(self, test, err, capt=None):
if issubclass(err[0], KnownFailureTest):
err = (SkipTest,) + err[1:]
return self.orig_addError(test, err, capt)
Xunit.orig_addError = Xunit.addError
Xunit.addError = addError
#-----------------------------------------------------------------------------
# Check which dependencies are installed and greater than minimum version.
#-----------------------------------------------------------------------------
def extract_version(mod):
return mod.__version__
def test_for(item, min_version=None, callback=extract_version):
"""Test to see if item is importable, and optionally check against a minimum
version.
If min_version is given, the default behavior is to check against the
`__version__` attribute of the item, but specifying `callback` allows you to
extract the value you are interested in. e.g::
In [1]: import sys
In [2]: from IPython.testing.iptest import test_for
In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info)
Out[3]: True
"""
try:
check = import_item(item)
except (ImportError, RuntimeError):
# GTK reports Runtime error if it can't be initialized even if it's
# importable.
return False
else:
if min_version:
if callback:
# extra processing step to get version to compare
check = callback(check)
return check >= min_version
else:
return True
# Global dict where we can store information on what we have and what we don't
# have available at test run time
have = {'matplotlib': test_for('matplotlib'),
'pygments': test_for('pygments'),
'sqlite3': test_for('sqlite3')}
#-----------------------------------------------------------------------------
# Test suite definitions
#-----------------------------------------------------------------------------
test_group_names = ['core',
'extensions', 'lib', 'terminal', 'testing', 'utils',
]
class TestSection(object):
def __init__(self, name, includes):
self.name = name
self.includes = includes
self.excludes = []
self.dependencies = []
self.enabled = True
def exclude(self, module):
if not module.startswith('IPython'):
module = self.includes[0] + "." + module
self.excludes.append(module.replace('.', os.sep))
def requires(self, *packages):
self.dependencies.extend(packages)
@property
def will_run(self):
return self.enabled and all(have[p] for p in self.dependencies)
# Name -> (include, exclude, dependencies_met)
test_sections = {n:TestSection(n, ['IPython.%s' % n]) for n in test_group_names}
# Exclusions and dependencies
# ---------------------------
# core:
sec = test_sections['core']
if not have['sqlite3']:
sec.exclude('tests.test_history')
sec.exclude('history')
if not have['matplotlib']:
sec.exclude('pylabtools'),
sec.exclude('tests.test_pylabtools')
# lib:
sec = test_sections['lib']
sec.exclude('kernel')
if not have['pygments']:
sec.exclude('tests.test_lexers')
# We do this unconditionally, so that the test suite doesn't import
# gtk, changing the default encoding and masking some unicode bugs.
sec.exclude('inputhookgtk')
# We also do this unconditionally, because wx can interfere with Unix signals.
# There are currently no tests for it anyway.
sec.exclude('inputhookwx')
# Testing inputhook will need a lot of thought, to figure out
# how to have tests that don't lock up with the gui event
# loops in the picture
sec.exclude('inputhook')
# testing:
sec = test_sections['testing']
# These have to be skipped on win32 because they use echo, rm, cd, etc.
# See ticket https://github.com/ipython/ipython/issues/87
if sys.platform == 'win32':
sec.exclude('plugin.test_exampleip')
sec.exclude('plugin.dtexample')
# don't run jupyter_console tests found via shim
test_sections['terminal'].exclude('console')
# extensions:
sec = test_sections['extensions']
# This is deprecated in favour of rpy2
sec.exclude('rmagic')
# autoreload does some strange stuff, so move it to its own test section
sec.exclude('autoreload')
sec.exclude('tests.test_autoreload')
test_sections['autoreload'] = TestSection('autoreload',
['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload'])
test_group_names.append('autoreload')
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
def check_exclusions_exist():
from IPython.paths import get_ipython_package_dir
from warnings import warn
parent = os.path.dirname(get_ipython_package_dir())
for sec in test_sections:
for pattern in sec.exclusions:
fullpath = pjoin(parent, pattern)
if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):
warn("Excluding nonexistent file: %r" % pattern)
class ExclusionPlugin(Plugin):
"""A nose plugin to effect our exclusions of files and directories.
"""
name = 'exclusions'
score = 3000 # Should come before any other plugins
def __init__(self, exclude_patterns=None):
"""
Parameters
----------
exclude_patterns : sequence of strings, optional
Filenames containing these patterns (as raw strings, not as regular
expressions) are excluded from the tests.
"""
self.exclude_patterns = exclude_patterns or []
super(ExclusionPlugin, self).__init__()
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
def configure(self, options, config):
Plugin.configure(self, options, config)
# Override nose trying to disable plugin.
self.enabled = True
def wantFile(self, filename):
"""Return whether the given filename should be scanned for tests.
"""
if any(pat in filename for pat in self.exclude_patterns):
return False
return None
def wantDirectory(self, directory):
"""Return whether the given directory should be scanned for tests.
"""
if any(pat in directory for pat in self.exclude_patterns):
return False
return None
class StreamCapturer(Thread):
daemon = True # Don't hang if main thread crashes
started = False
def __init__(self, echo=False):
super(StreamCapturer, self).__init__()
self.echo = echo
self.streams = []
self.buffer = BytesIO()
self.readfd, self.writefd = os.pipe()
self.buffer_lock = Lock()
self.stop = Event()
def run(self):
self.started = True
while not self.stop.is_set():
chunk = os.read(self.readfd, 1024)
with self.buffer_lock:
self.buffer.write(chunk)
if self.echo:
sys.stdout.write(bytes_to_str(chunk))
os.close(self.readfd)
os.close(self.writefd)
def reset_buffer(self):
with self.buffer_lock:
self.buffer.truncate(0)
self.buffer.seek(0)
def get_buffer(self):
with self.buffer_lock:
return self.buffer.getvalue()
def ensure_started(self):
if not self.started:
self.start()
def halt(self):
"""Safely stop the thread."""
if not self.started:
return
self.stop.set()
os.write(self.writefd, b'\0') # Ensure we're not locked in a read()
self.join()
class SubprocessStreamCapturePlugin(Plugin):
name='subprocstreams'
def __init__(self):
Plugin.__init__(self)
self.stream_capturer = StreamCapturer()
self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture')
# This is ugly, but distant parts of the test machinery need to be able
# to redirect streams, so we make the object globally accessible.
nose.iptest_stdstreams_fileno = self.get_write_fileno
def get_write_fileno(self):
if self.destination == 'capture':
self.stream_capturer.ensure_started()
return self.stream_capturer.writefd
elif self.destination == 'discard':
return os.open(os.devnull, os.O_WRONLY)
else:
return sys.__stdout__.fileno()
def configure(self, options, config):
Plugin.configure(self, options, config)
# Override nose trying to disable plugin.
if self.destination == 'capture':
self.enabled = True
def startTest(self, test):
# Reset log capture
self.stream_capturer.reset_buffer()
def formatFailure(self, test, err):
# Show output
ec, ev, tb = err
captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
if captured.strip():
ev = safe_str(ev)
out = [ev, '>> begin captured subprocess output <<',
captured,
'>> end captured subprocess output <<']
return ec, '\n'.join(out), tb
return err
formatError = formatFailure
def finalize(self, result):
self.stream_capturer.halt()
def run_iptest():
"""Run the IPython test suite using nose.
This function is called when this script is **not** called with the form
`iptest all`. It simply calls nose with appropriate command line flags
and accepts all of the standard nose arguments.
"""
# Apply our monkeypatch to Xunit
if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):
monkeypatch_xunit()
arg1 = sys.argv[1]
if arg1 in test_sections:
section = test_sections[arg1]
sys.argv[1:2] = section.includes
elif arg1.startswith('IPython.') and arg1[8:] in test_sections:
section = test_sections[arg1[8:]]
sys.argv[1:2] = section.includes
else:
section = TestSection(arg1, includes=[arg1])
argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks
# We add --exe because of setuptools' imbecility (it
# blindly does chmod +x on ALL files). Nose does the
# right thing and it tries to avoid executables,
# setuptools unfortunately forces our hand here. This
# has been discussed on the distutils list and the
# setuptools devs refuse to fix this problem!
'--exe',
]
if '-a' not in argv and '-A' not in argv:
argv = argv + ['-a', '!crash']
if nose.__version__ >= '0.11':
# I don't fully understand why we need this one, but depending on what
# directory the test suite is run from, if we don't give it, 0 tests
# get run. Specifically, if the test suite is run from the source dir
# with an argument (like 'iptest.py IPython.core', 0 tests are run,
# even if the same call done in this directory works fine). It appears
# that if the requested package is in the current dir, nose bails early
# by default. Since it's otherwise harmless, leave it in by default
# for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.
argv.append('--traverse-namespace')
plugins = [ ExclusionPlugin(section.excludes), KnownFailure(),
SubprocessStreamCapturePlugin() ]
# we still have some vestigial doctests in core
if (section.name.startswith(('core', 'IPython.core'))):
plugins.append(IPythonDoctest())
argv.extend([
'--with-ipdoctest',
'--ipdoctest-tests',
'--ipdoctest-extension=txt',
])
# Use working directory set by parent process (see iptestcontroller)
if 'IPTEST_WORKING_DIR' in os.environ:
os.chdir(os.environ['IPTEST_WORKING_DIR'])
# We need a global ipython running in this process, but the special
# in-process group spawns its own IPython kernels, so for *that* group we
# must avoid also opening the global one (otherwise there's a conflict of
# singletons). Ultimately the solution to this problem is to refactor our
# assumptions about what needs to be a singleton and what doesn't (app
# objects should, individual shells shouldn't). But for now, this
# workaround allows the test suite for the inprocess module to complete.
if 'kernel.inprocess' not in section.name:
from IPython.testing import globalipapp
globalipapp.start_ipython()
# Now nose can run
TestProgram(argv=argv, addplugins=plugins)
if __name__ == '__main__':
run_iptest()
| bsd-3-clause |
mdegis/machine-learning | 003 - Decision Tree/exercise/main.py | 1 | 1468 | from PIL import Image
import sys
sys.path.append("../../tools")
from class_vis import prettyPicture
from prep_terrain_data import makeTerrainData
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import copy
import numpy as np
import pylab as pl
features_train, labels_train, features_test, labels_test = makeTerrainData()
clf = DecisionTreeClassifier(min_samples_split=50)
# now your job is to fit the classifier
# using the training features/labels, and to
# make a set of predictions on the test data
clf.fit(features_train, labels_train)
# store your predictions in a list named pred
pred = clf.predict(features_test)
prettyPicture(clf, features_test, labels_test, f_name="dec_tree.png")
Image.open('dec_tree.png').show()
acc = accuracy_score(pred, labels_test)
print "Decision Tree accuracy: %r" % acc
"""
clf = DecisionTreeClassifier(min_samples_split=2)
clf.fit(features_train, labels_train)
pred = clf.predict(features_test)
acc_min_samples_split_2 = accuracy_score(pred, labels_test)
clf = DecisionTreeClassifier(min_samples_split=50)
clf.fit(features_train, labels_train)
pred = clf.predict(features_test)
acc_min_samples_split_50 = accuracy_score(pred, labels_test)
def submit_accuracies():
return {"acc_min_samples_split_2":round(acc_min_samples_split_2,3),
"acc_min_samples_split_50":round(acc_min_samples_split_50,3)}
submit_accuracies()
""" | gpl-3.0 |
pygeo/pycmbs | pycmbs/benchmarking/report.py | 1 | 9277 | # -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
import os
import sys
from matplotlib import pylab as pl
class Report(object):
"""
A class to generate latex based report
"""
def __init__(self, filename, title, author, format='png',
outdir='./', dpi=300, logofile='Phytonlogo5.pdf',
usehyperlinks=True, autocompile=True):
"""
constructor for Latex report class
filename : str
name of output file
title : str
name of author
format : str
output format for figures (e.g. png, pdf)
outdir : str
output directory to write report and images to
dpi : int
specify dots per inch for graphic output
logofile : str
name of file for logo on first page
if None or file not existing, then nothing will
be plotted
usehyperlinks : bool
use hyperlinks for table of contents
author : str
Author of the document
autocompile : bool
ensure automatic PDF creation when
report is closed
"""
ext = ''
if filename[:-4] != '.tex':
ext = '.tex'
self.filename = outdir + filename + ext
self.format = format
self.title = title
self.author = author
self.outdir = outdir
# needs to be before open()
self.logofile = logofile
self.usehyperlinks = usehyperlinks
self.open()
self.figure_counter = 0
self.dpi = dpi
self.autocompile = autocompile
def open(self, landscape=False):
""" open report """
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
if os.path.exists(self.filename):
os.remove(self.filename)
self.landscape = landscape
self.file = open(self.filename, 'w')
self._write_header()
def close(self):
""" close report """
self._write_footer()
self.file.close()
if self.autocompile:
print 'Compiling REPORT ...'
self.compile()
def _write_header(self):
""" write document header """
if self.landscape:
landscape = 'landscape'
else:
landscape = ''
self.write('\documentclass[' + landscape + ']{article}')
self.write('\usepackage{fancyhdr}')
self.write('\usepackage{graphicx}')
self.write('\usepackage{multirow}')
self.write('\usepackage{multicol}')
#facilitates handling of floating environments
self.write('\usepackage{placeins}')
self.write('\usepackage{tabularx}')
#self.write('\usepackage{todonotes}')
self.write('\pagestyle{fancy}')
self.write('\fancyhf{}')
# writes section header
self.write('\lhead{\nouppercase{\leftmark}}')
self.write('\lfoot{\today}')
self.write('\rfoot{\thepage}')
if self.usehyperlinks:
self.write('\usepackage{hyperref}')
self.write('\hypersetup{colorlinks,citecolor=black,\
filecolor=black,linkcolor=black,urlcolor=black}')
self.write('\\begin{document}')
self.write('\\title{' + self.title.replace('_', ' ') + '}')
self.write('\\author{' + self.author + '}')
self.write('\maketitle')
if self.logofile is not None:
# logo for report
if os.path.exists(self.logofile):
self._write_single_figure(self.logofile, None)
self.write('\\newpage')
self.write('\\tableofcontents')
self.write('\\newpage')
self._write_separator()
def _write_single_figure(self, figpath, caption):
#/// LOGO ///
self._write_separator()
self.write('\\begin{figure}[!htp]')
self.write(' \centering')
self.write(' \includegraphics[width=4cm]{' + figpath + '} ')
if caption is not None:
self.write(' \caption{' + caption.replace('_', '-').replace('#', '-') + '}')
self.write('\\end{figure}')
self._write_separator()
def _write_footer(self):
""" write document footer """
self._write_separator()
self.write('\end{document}')
def _write_separator(self):
"""
write line with comments (useful to structure document sections)
"""
self.write('')
self.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
self.write('')
def figure(self, f, caption='', width='\\textwidth', height='\\textheight,keepaspectratio', bbox_inches='tight'):
"""
add a figure string to the report
Parameters
----------
f : figure
figure that will be incuded into the report
caption : str
caption for the figure to be put in the report
width : str
width as string like in latext e.g. width='12cm'
bbox_inches : str
option for savefig
"""
if f is None:
return
self.figure_counter += 1
figname = 'fig_' + str(self.figure_counter).zfill(5) + '.' + self.format
self._include_figure(figname, caption=caption, width=width, height=height)
print('Saving figure %s' % self.outdir + figname)
f.savefig(self.outdir + figname, bbox_inches=bbox_inches, dpi=self.dpi)
def _include_figure(self, figname, caption='', width='\\textwidth', height='\\textheight,keepaspectratio'):
"""
include figure in latex file
"""
self._write_separator()
self.write('\\begin{figure}[htp]')
self.write(' \centering')
self.write(' \includegraphics[width=' + width + ', height=' + height + ']{'
+ figname + '} ')
if len(caption) > 0:
self.write(' \caption{' + caption.replace('_', '-').replace('#', '-') + '}')
self.write(' \label{fig:' + str(self.figure_counter) + '}')
self.write('\\end{figure}')
self._write_separator()
def section(self, s):
"""
write section header
s : str
title of section
"""
self.clearpage()
self.write('\section{' + s.replace('_', ' ') + '}')
def subsection(self, s):
"""
write subsection header
s : str
title of subsection
"""
#self.write('\clearpage')
self.barrier()
self.write('\subsection{' + s.replace('_', ' ') + '}')
def subsubsection(self, s):
"""
write subsection header
Parameters
----------
s : str
title of subsection
"""
self.barrier()
self.write('\subsubsection{' + s.replace('_', ' ') + '}')
def capture_figures(self):
"""
captures all figures that are plotted and
store them in the report
"""
print 'Capturing figures and writing to report ...'
for i in pl.get_fignums():
f = pl.figure(i)
self.figure(f)
self.newpage()
def newpage(self):
""" create a new page """
self.clearpage()
self.write('\\newpage')
def clearpage(self):
"""
create a new page
as an alternative, one could also use the placeins package
http://tug.ctan.org/tex-archive/info/l2picfaq/german/l2picfaq.pdf
\usepackage{placeins}
...
\FloatBarrier
This ensures that all Figures/tables before the breakpoint
are put to paper
WITHOUT generating a new page. It is thus the opposite to
\clearpage
"""
self.write('\clearpage')
def barrier(self):
self.write('\FloatBarrier')
def write(self, s):
"""
write a string to the file
Parameters
----------
s : str
string to be written to the file
"""
self.file.write(s.replace('\f', '\\f').replace('\n', '\\n')
.replace('\t', '\\t')
.replace('\r', '\\r') + '\n')
def open_table(self):
""" opens a table """
self.write('\\begin{table}[htp]')
self.write(' \centering')
def close_table(self, caption='Put a figure caption here'):
""" closes a table """
self.write(' \caption{' + caption.replace('_', '-').replace('#', '-') + '}')
self.write('\end{table}')
def input(self, filename):
""" write an input statement """
self.write('\\input{' + filename + '}')
if not os.path.exists(filename):
print('WARNING: output file used in report not yet existing!')
def compile(self):
"""
compile latex document
"""
curdir = os.getcwd()
pdfdir = os.path.dirname(self.filename)
texfile = os.path.basename(self.filename)
os.chdir(pdfdir)
# compile report twice
os.system('pdflatex ' + texfile)
os.system('pdflatex ' + texfile)
os.chdir(curdir)
| mit |
rgommers/statsmodels | statsmodels/sandbox/nonparametric/tests/ex_gam_am_new.py | 34 | 2606 | # -*- coding: utf-8 -*-
"""Example for gam.AdditiveModel and PolynomialSmoother
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Created on Fri Nov 04 13:45:43 2011
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, zip
import time
import numpy as np
#import matplotlib.pyplot as plt
from numpy.testing import assert_almost_equal
from scipy import stats
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.regression.linear_model import OLS, WLS
np.random.seed(8765993)
#seed is chosen for nice result, not randomly
#other seeds are pretty off in the prediction
#DGP: simple polynomial
order = 3
sigma_noise = 0.5
nobs = 1000 #1000 #with 1000, OLS and Additivemodel aggree in params at 2 decimals
lb, ub = -3.5, 4#2.5
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*2, x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) / 2.
z = y_true #alias check
d = x
y = y_true + sigma_noise * np.random.randn(nobs)
example = 1
if example == 1:
m = AdditiveModel(d)
m.fit(y)
y_pred = m.results.predict(d)
for ss in m.smoothers:
print(ss.params)
res_ols = OLS(y, exog_reduced).fit()
print(res_ols.params)
#assert_almost_equal(y_pred, res_ols.fittedvalues, 3)
if example > 0:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(exog)
y_pred = m.results.mu# + m.results.alpha #m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(y, '.', alpha=0.25)
plt.plot(y_true, 'k-', label='true')
plt.plot(res_ols.fittedvalues, 'g-', label='OLS', lw=2, alpha=-.7)
plt.plot(y_pred, 'r-', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel')
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], y[sortidx], '.', alpha=0.25)
plt.plot(xx[sortidx], y_true[sortidx], 'k.', label='true', lw=2)
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel ' + ii)
counter += 1
plt.show() | bsd-3-clause |
tedunderwood/genre | munging/Evaluate.py | 1 | 17658 | # Evaluate page predictions
# EvaluatePagePredicts.py
import os, sys
import numpy as np
import pandas as pd
from scipy.stats.stats import pearsonr
import SonicScrewdriver as utils
import MetadataCascades as cascades
import Coalescer
from math import log
import statsmodels.api as sm
import pickle
import ConfusionMatrix
def pairtreelabel(htid):
''' Given a clean htid, returns a dirty one that will match
the metadata table.'''
if '+' in htid or '=' in htid:
htid = htid.replace('+',':')
htid = htid.replace('=','/')
return htid
# genretranslations = {'subsc' : 'front', 'argum': 'non', 'pref': 'non', 'aut': 'bio', 'bio': 'bio',
# 'toc': 'front', 'title': 'front', 'bookp': 'front',
# 'bibli': 'back', 'gloss': 'back', 'epi': 'fic', 'errat': 'non', 'notes': 'non', 'ora': 'non',
# 'let': 'non', 'trv': 'non', 'lyr': 'poe', 'nar': 'poe', 'vdr': 'dra', 'pdr': 'dra',
# 'clo': 'dra', 'impri': 'front', 'libra': 'back', 'index': 'back'}
genretranslations = {'subsc' : 'front', 'argum': 'non', 'pref': 'non', 'aut': 'bio', 'bio': 'bio', 'toc': 'front', 'title': 'front', 'bookp': 'front', 'bibli': 'back', 'gloss': 'back', 'epi': 'fic', 'errat': 'non', 'notes': 'non', 'ora': 'non', 'let': 'bio', 'trv': 'non', 'lyr': 'poe', 'nar': 'poe', 'vdr': 'dra', 'pdr': 'dra', 'clo': 'dra', 'impri': 'front', 'libra': 'back', 'index': 'back'}
user = input("Which directory of predictions? ")
predictdir = "/Volumes/TARDIS/output/" + user
groundtruthdir = "/Users/tunder/Dropbox/pagedata/newfeatures/genremaps/"
# else:
# groundtruthdir = "/Users/tunder/Dropbox/pagedata/mixedtraining/genremaps/"
predictfiles = os.listdir(predictdir)
thefictiondir = input("Fiction dir? ")
if thefictiondir != "n":
thefictiondir = "/Volumes/TARDIS/output/" + thefictiondir
thepoedir = input("Poetry dir? ")
if thepoedir != "n":
thepoedir = "/Volumes/TARDIS/output/" + thepoedir
user = input("Count words (y/n)? ")
if user == "y":
countwords = True
else:
countwords = False
# user = input("Separate index (y/n)? ")
# if user == "y":
# genretranslations["index"] = "index"
# genretranslations["gloss"] = "index"
# genretranslations["bibli"] = "index"
user = input("Bibli == ads? ")
if user == "y":
genretranslations['bibli'] = 'ads'
user = input("Old ground truth? ")
if user == "n":
groundtruthdir = "/Users/tunder/Dropbox/pagedata/thirdfeatures/genremaps/"
elif user == "fourth":
groundtruthdir = "/Users/tunder/Dropbox/pagedata/fourthfeatures/genremaps/"
elif user == "fifth":
groundtruthdir = "/Users/tunder/Dropbox/pagedata/fifthfeatures/genremaps/"
elif user == "sixth":
groundtruthdir = "/Users/tunder/Dropbox/pagedata/sixthfeatures/genremaps/"
elif user == "seventh":
groundtruthdir = "/Users/tunder/Dropbox/pagedata/seventhfeatures/genremaps/"
elif user == "8":
groundtruthdir = "/Users/tunder/Dropbox/pagedata/eighthfeatures/genremaps/"
elif user == "newtest":
groundtruthdir = "/Users/tunder/Dropbox/pagedata/nt/genremaps/"
elif user == "20c":
groundtruthdir = "/Users/tunder/Dropbox/pagedata/to1923features/genremaps/"
groundtruthfiles = os.listdir(groundtruthdir)
tocoalesce = input("Coalesce? ")
if tocoalesce == "y":
tocoalesce = True
else:
tocoalesce = False
if countwords:
filewordcounts = dict()
if groundtruthdir == "/Users/tunder/Dropbox/pagedata/thirdfeatures/genremaps/":
wordcountpath = "/Users/tunder/Dropbox/pagedata/thirdfeatures/pagelevelwordcounts.tsv"
elif groundtruthdir == "/Users/tunder/Dropbox/pagedata/fourthfeatures/genremaps/":
wordcountpath = "/Users/tunder/Dropbox/pagedata/fourthfeatures/pagelevelwordcounts.tsv"
elif groundtruthdir == "/Users/tunder/Dropbox/pagedata/fifthfeatures/genremaps/":
wordcountpath = "/Users/tunder/Dropbox/pagedata/fifthfeatures/pagelevelwordcounts.tsv"
elif groundtruthdir == "/Users/tunder/Dropbox/pagedata/sixthfeatures/genremaps/":
wordcountpath= "/Users/tunder/Dropbox/pagedata/sixthfeatures/pagelevelwordcounts.tsv"
elif groundtruthdir == "/Users/tunder/Dropbox/pagedata/seventhfeatures/genremaps/":
wordcountpath= "/Users/tunder/Dropbox/pagedata/seventhfeatures/pagelevelwordcounts.tsv"
elif groundtruthdir == "/Users/tunder/Dropbox/pagedata/nt/genremaps/":
wordcountpath = "/Users/tunder/Dropbox/pagedata/nt/pagelevelwordcounts.tsv"
elif groundtruthdir == "/Users/tunder/Dropbox/pagedata/eighthfeatures/genremaps/":
wordcountpath = "/Users/tunder/Dropbox/pagedata/eighthfeatures/pagelevelwordcounts.tsv"
elif groundtruthdir == "/Users/tunder/Dropbox/pagedata/to1923features/genremaps/":
wordcountpath = "/Users/tunder/Dropbox/pagedata/to1923features/pagelevelwordcounts.tsv"
else:
wordcountpath = "/Users/tunder/Dropbox/pagedata/pagelevelwordcounts.tsv"
with open(wordcountpath, mode="r", encoding="utf-8") as f:
filelines = f.readlines()
for line in filelines[1:]:
line = line.rstrip()
fields = line.split('\t')
htid = fields[0]
pagenum = int(fields[1])
count = int(fields[2])
if htid in filewordcounts:
filewordcounts[htid].append((pagenum, count))
else:
filewordcounts[htid] = [(pagenum, count)]
for key, value in filewordcounts.items():
value.sort()
# This just makes sure tuples are sorted in pagenum order.
else:
filewordcounts = dict()
# The base list here is produced by predictfiles
# because obvs we don't care about ground truth
# that we can't map to a prediction.
# Our goal in the next loop is to produce such a mapping.
matchedfilenames = dict()
for filename in predictfiles:
if ".predict" not in filename:
continue
htid = filename[0:-8]
groundtruthversion = htid + ".map"
if groundtruthversion not in groundtruthfiles:
print("Missing " + htid)
else:
matchedfilenames[filename] = groundtruthversion
# We have identified filenames. Now define functions.
def genresareequal(truegenre, predictedgenre):
arethesame = ["bio", "trv", "aut", "non"]
alsothesame = ["back", "index", "front", "ads"]
if truegenre == predictedgenre:
return True
elif truegenre in arethesame and predictedgenre in arethesame:
return True
elif truegenre in alsothesame and predictedgenre in alsothesame:
return True
else:
return False
def compare_two_lists(truelist, predicted, wordsperpage, whethertocountwords):
global genretranslations
if len(truelist) != len(predicted):
print(len(truelist))
print(truelist[len(truelist) -2])
print(truelist[len(truelist) -1])
print(len(predicted))
sys.exit()
errorsbygenre = dict()
correctbygenre = dict()
accurate = 0
inaccurate = 0
totaltruegenre = dict()
for index, truegenre in enumerate(truelist):
if truegenre in genretranslations:
truegenre = genretranslations[truegenre]
if whethertocountwords:
increment = wordsperpage[index]
else:
increment = 1
utils.addtodict(truegenre, increment, totaltruegenre)
predictedgenre = predicted[index]
if genresareequal(truegenre, predictedgenre):
utils.addtodict(truegenre, increment, correctbygenre)
accurate += increment
else:
utils.addtodict((truegenre, predictedgenre), increment, errorsbygenre)
inaccurate += increment
return totaltruegenre, correctbygenre, errorsbygenre, accurate, inaccurate
def add_dictionary(masterdict, dicttoadd):
for key, value in dicttoadd.items():
if key in masterdict:
masterdict[key] += value
else:
masterdict[key] = value
return masterdict
def evaluate_filelist(matchedfilenames, excludedhtidlist):
global predictdir, groundtruthdir, filewordcounts
smoothederrors = dict()
unsmoothederrors = dict()
smoothedcorrect = dict()
unsmoothedcorrect = dict()
coalescederrors = dict()
coalescedcorrect = dict()
totalgt = dict()
roughaccurate = 0
roughnotaccurate = 0
smoothaccurate = 0
smoothnotaccurate = 0
coalescedaccurate = 0
coalescednotaccurate = 0
# The correct dictionaries pair a genre code (in the original) to a number of times it was correctly
# identified
# The error dictionaries map a tuple of (correct code, error code) to a number of times it occurred.
truesequences = dict()
predictedsequences = dict()
accuracies = dict()
metadatatable = dict()
symptoms = ["weakconfirmation", "weakdenial", "strongconfirmation", "strongdenial", "modelagrees", "modeldisagrees"]
for symptom in symptoms:
metadatatable[symptom] = dict()
metadatatable["numberofchunks"] = dict()
# metadatatable["fictonon"] = dict()
# metadatatable["bio"] = dict()
for pfile, gtfile in matchedfilenames.items():
htid = gtfile[0:-4]
if htid in excludedhtidlist:
continue
# The predictionfile has three columns, of which the second
# is an unsmoothed prediction and the third is smoothed
smoothlist = list()
roughlist = list()
detailedprobabilities = list()
pfilepath = os.path.join(predictdir, pfile)
with open(pfilepath,encoding = "utf-8") as f:
filelines = f.readlines()
for line in filelines:
line = line.rstrip()
fields = line.split('\t')
roughlist.append(fields[1])
smoothlist.append(fields[2])
if len(fields) > 5:
detailedprobabilities.append("\t".join(fields[5:]))
# The prediction file has this format:
# pagenumber roughgenre smoothgenre many ... detailed predictions
# fields 3 and 4 will be predictions for dummy genres "begin" and "end"
correctlist = list()
gtfilepath = os.path.join(groundtruthdir, gtfile)
with open(gtfilepath,encoding = "utf-8") as f:
filelines = f.readlines()
for line in filelines:
line = line.rstrip()
fields = line.split('\t')
correctlist.append(fields[1])
assert len(correctlist) == len(roughlist)
if countwords:
tuplelist = filewordcounts[htid]
wordsperpage = [x[1] for x in tuplelist]
else:
wordsperpage = list()
# Experiment.
oldgenre = ""
transitioncount = 0
biocount = 0
for agenre in roughlist:
if agenre == "bio":
biocount += 1
if oldgenre == "fic" and (agenre == "non" or agenre =="bio"):
transitioncount += 1
oldgenre = agenre
fictionfilepath = os.path.join(thefictiondir, pfile)
poetryfilepath = os.path.join(thepoedir, pfile)
mainmodel = cascades.read_probabilities(detailedprobabilities)
mostlydrapoe, probablybiography, probablyfiction, notdrama, notfiction = cascades.choose_cascade(htid, smoothlist)
# This function returns three boolean values which will help us choose a specialized model
# to correct current predictions. This scheme is called "cascading classification," thus
# we are "choosing a cascade."
#defensive copy
adjustedlist = [x for x in smoothlist]
if notdrama:
adjustedlist = cascades.otherthandrama(adjustedlist, mainmodel)
if notfiction:
adjustedlist = cascades.otherthanfiction(adjustedlist, mainmodel)
if thepoedir != "n" and thefictiondir != "n":
numberoftrues = sum([mostlydrapoe, probablybiography, probablyfiction])
if numberoftrues == 1:
if mostlydrapoe and thepoedir != "n":
adjustedlist, mainmodel = cascades.drapoe_cascade(adjustedlist, mainmodel, poetryfilepath)
elif probablybiography:
adjustedlist = cascades.biography_cascade(adjustedlist)
elif probablyfiction and thefictiondir != "n":
adjustedlist, mainmodel = cascades.fiction_cascade(adjustedlist, mainmodel, fictionfilepath)
if len(smoothlist) != len(adjustedlist):
print("Already vitiated.")
print(len(smoothlist), smoothlist[len(smoothlist) -1])
print(len(adjustedlist), adjustedlist[len(adjustedlist) -1])
print(notfiction,notdrama,mostlydrapoe,probablybiography,probablyfiction)
if tocoalesce:
coalescedlist, numberofdistinctsequences = Coalescer.coalesce(adjustedlist)
# This function simplifies our prediction by looking for cases where a small
# number of pages in genre X are surrounded by larger numbers of pages in
# genre Y. This is often an error, and in cases where it's not technically
# an error it's a scale of variation we usually want to ignore. However,
# we will also record detailed probabilities for users who *don't* want to
# ignore these
else:
coalescedlist = adjustedlist
dummy, numberofdistinctsequences = Coalescer.coalesce(adjustedlist)
if len(smoothlist) != len(coalescedlist):
print('vitiated now')
metadataconfirmation = cascades.metadata_check(htid, coalescedlist)
# Now that we have adjusted
for key, value in metadataconfirmation.items():
metadatatable[key][htid] = value
metadatatable["numberofchunks"][htid] = log(numberofdistinctsequences + 1)
# metadatatable["fictonon"][htid] = transitioncount
# metadatatable["bio"][htid] = biocount / len(roughlist)
# This is significant. We don't want to overpenalize long books, but there is
# a correlation between the number of predicted genre shifts and inaccuracy.
# So we take the log.
totaltruegenre, correctbygenre, errorsbygenre, accurate, inaccurate = compare_two_lists(correctlist, smoothlist, wordsperpage, countwords)
add_dictionary(smoothederrors, errorsbygenre)
add_dictionary(smoothedcorrect, correctbygenre)
add_dictionary(totalgt, totaltruegenre)
# Only do this for one comparison
smoothaccurate += accurate
smoothnotaccurate += inaccurate
if ("index", "non") in errorsbygenre:
if errorsbygenre[("index", "non")] > 2:
print("Index fail: " + htid + " " + str(errorsbygenre[("index", "non")]))
totaltruegenre, correctbygenre, errorsbygenre, accurate, inaccurate = compare_two_lists(correctlist, roughlist, wordsperpage, countwords)
add_dictionary(unsmoothederrors, errorsbygenre)
add_dictionary(unsmoothedcorrect, correctbygenre)
roughaccurate += accurate
roughnotaccurate += inaccurate
totaltruegenre, correctbygenre, errorsbygenre, accurate, inaccurate = compare_two_lists(correctlist, coalescedlist, wordsperpage, countwords)
add_dictionary(coalescederrors, errorsbygenre)
add_dictionary(coalescedcorrect, correctbygenre)
coalescedaccurate += accurate
coalescednotaccurate += inaccurate
truesequences[gtfile] = correctlist
predictedsequences[gtfile] = coalescedlist
thisaccuracy = accurate / (accurate + inaccurate)
accuracies[htid] = thisaccuracy
# Now we need to interpret the dictionaries.
for genre, count in totalgt.items():
print()
print(genre.upper() + " : " + str(count))
if count < 1:
continue
print()
print("SMOOTHED PREDICTION, " + str(count) + " | " + genre)
print("Correctly identified: " + str(smoothedcorrect.get(genre, 0) / count))
print("Errors: ")
for key, errorcount in smoothederrors.items():
gt, predict = key
if gt == genre:
print(predict + ": " + str(errorcount) + " " + str (errorcount/count))
print()
print("COALESCED PREDICTION, " + str(count) + " | " + genre)
print("Correctly identified: " + str(coalescedcorrect.get(genre, 0) / count))
print("Errors: ")
for key, errorcount in coalescederrors.items():
gt, smoothed = key
if gt == genre:
print(smoothed + ": " + str(errorcount) + " " + str (errorcount/count))
roughaccuracy = roughaccurate / (roughaccurate + roughnotaccurate)
smoothaccuracy = smoothaccurate / (smoothaccurate + smoothnotaccurate)
coalaccuracy = coalescedaccurate / (coalescedaccurate + coalescednotaccurate)
confusion = ConfusionMatrix.confusion_matrix(coalescedcorrect, coalescederrors)
return metadatatable, accuracies, roughaccuracy, smoothaccuracy, coalaccuracy
metadatatable, accuracies, roughaccuracy, smoothaccuracy, coalaccuracy = evaluate_filelist(matchedfilenames, list())
print()
print("ROUGH MICROACCURACY:")
print(roughaccuracy)
print("SMOOTHED MICROACCURACY:")
print(smoothaccuracy)
print("COALESCED MICROACCURACY:")
print(coalaccuracy)
with open("/Users/tunder/Dropbox/pagedata/interrater/ActualAccuracies.tsv", mode = "w", encoding="utf-8") as f:
f.write("htid\taccuracy\n")
for key, value in accuracies.items():
outline = key + "\t" + str(value) + "\n"
f.write(outline)
metadatapath = os.path.join(predictdir, "predictionMetadata.tsv")
rowindices, columns, metadata = utils.readtsv(metadatapath)
metadatatable['maxprob']= metadata['maxprob']
metadatatable['gap'] = metadata['gap']
metadatatable['accuracy'] = accuracies
data = pd.DataFrame(metadatatable, dtype = "float")
data['intercept'] = 1.0
train_cols = data.columns[1:]
logit = sm.Logit(data['accuracy'], data[train_cols])
result = logit.fit()
print(result.summary())
predictions = result.predict(data[train_cols])
print(pearsonr(data['accuracy'], predictions))
# print("Checking logitpredict.")
# import LogisticPredict
# homegrown = LogisticPredict.logitpredict(result.params, data[train_cols])
# print(pearsonr(predictions, homegrown))
# user = input("Dump model to pickle file? (y/n) ")
# if user == "y":
# with open("/Volumes/TARDIS/output/models/PredictAccuracy.p", mode = "w+b") as picklefile:
# pickle.dump(result, picklefile, protocol = 3)
user = input("Dump model parameters to file? (y/n) ")
if user == "y":
result.params.to_csv("/Volumes/TARDIS/output/models/ConfidenceModelParameters.csv")
print("Saved to /Volumes/TARDIS/output/models/ConfidenceModelParameters.csv")
idstoexclude = [x for x in data.index[predictions < .9]]
metadatatable, newaccuracies, roughaccuracy, smoothaccuracy, coalaccuracy = evaluate_filelist(matchedfilenames, idstoexclude)
print()
print("ROUGH MICROACCURACY:")
print(roughaccuracy)
print("SMOOTHED MICROACCURACY:")
print(smoothaccuracy)
print("COALESCED MICROACCURACY:")
print(coalaccuracy)
user = input("Continue? ")
for filename, accuracy in accuracies.items():
print(accuracy, filename)
truesequence = truesequences[filename]
predictedsequence = predictedsequences[filename]
for index, truegenre in enumerate(truesequence):
print(truegenre + ' \t ' + predictedsequence[index])
user = input("Continue? ")
| mit |
fraserphysics/F_UNCLE | F_UNCLE/Utils/test/test_Experiment.py | 1 | 6345 | #!/usr/bin/pyton
"""
test_Experiment
Tests of the Experiment abstract class
Authors
-------
- Stephen Andrews (SA)
- Andrew M. Fraiser (AMF)
Revisions
---------
0 -> Initial class creation (03-16-2016)
ToDo
----
None
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# =========================
# Python Standard Libraries
# =========================
import unittest
import sys
import os
import copy
import warnings
import time
# =========================
# Python Packages
# =========================
import numpy as np
from numpy.linalg import inv
import numpy.testing as npt
import matplotlib.pyplot as plt
from scipy.interpolate import InterpolatedUnivariateSpline as IUSpline
# =========================
# Custom Packages
# =========================
from ..Struc import Struc
from ..PhysicsModel import PhysicsModel
from .test_PhysicsModel import SimpleModel
from .test_Simulation import SimpleSimulation
from ..Simulation import Simulation
from ..Experiment import Experiment, GaussianExperiment
class SimpleExperiment(GaussianExperiment):
"""This creates a set of synthetic experimental data.
It follows the same functional form as the SimpleModel and
SimpleSimulation but introduces a time shift
"""
def _get_data(self, *args, **kwargs):
"""Generates some synthetic data
Independent variable spans form 1 to 11
Dependent variable is :math:`3 (xx-1)^2 + xx -1`
This allows us to test the trigger as well
"""
xx = np.linspace(0, 10, 5)
yy = (4 * (xx))**2 + 2 * (xx)
return xx + 1, yy, None
def simple_trigger(self, x, y):
"""Rather than test the trigger funciton, this returns the known offset
"""
return x[0]
def get_splines(self, x_data, y_data, var_data=0):
"""Overloads the base spline generateion which cannot deal with the
smooth experiment
"""
return IUSpline(x_data, y_data), None
class TestSimpleExperiment(unittest.TestCase):
"""Exercises the functionality of Expeiment using simple classes
"""
def setUp(self):
"""Creates a SimpleSimulation
"""
self.models = {'simp': SimpleModel([2, 1])}
self.simSimp = SimpleSimulation()
def test_init(self):
"""Tests that the base experiment can be instantiated
"""
exp = SimpleExperiment()
self.assertIsInstance(exp, GaussianExperiment)
def test_shape(self):
"""Tests that the correct shape of the data is returned
"""
exp = SimpleExperiment()
self.assertEqual(5, exp.shape())
def test_sigma(self):
"""Tests that the correct covariance matrix is generated
"""
exp = SimpleExperiment(exp_var=0.01)
true_sigma = np.diag((exp.data[1] * 0.01)**2)
npt.assert_array_almost_equal(true_sigma, exp.get_sigma())
def test_align(self):
"""Test of the align function
"""
exp = SimpleExperiment()
# Generate some simulation data
sim_data = self.simSimp(self.models)
stored_data = copy.deepcopy(sim_data)
# Align the simulation data with the experiment
aligned_data = exp.align(sim_data)
# self.plot('aligned', exp, aligned_data)
self.assertEqual(aligned_data[2]['tau'], 1)
npt.assert_array_equal(aligned_data[0], exp.data[0])
def test_compare(self):
"""Tests that the experiment can be compared to aligned simulation data
"""
exp = SimpleExperiment()
# Generate some simulation data
sim_data = self.simSimp(self.models)
stored_data = copy.deepcopy(sim_data)
sim_data = exp.align(sim_data)
epsilon = exp.compare(sim_data)
npt.assert_array_almost_equal((4**2 - 2**2) * (exp.data[0]-1)**2
+ (2-1) * (exp.data[0]-1), epsilon)
def test_pq(self):
"""Tests that the experiment can generate the p and q matrix
"""
exp = SimpleExperiment()
# Generate some simulation data
sim_data = self.simSimp(self.models)
# Align the data so that it is evaluated at the same time steps as
# the experiment
aligned_data = exp.align(sim_data)
# Get the sensitivity matrix
sens_matrix = self.simSimp.get_sens(self.models, ['simp'], aligned_data)
# Get the error between the sim and experiment
epsilon = exp.compare(sim_data)
p_mat, q_vec = exp.get_pq(self.models, ['simp'], sim_data, sens_matrix,
scale=False)
sigma_mat = exp.get_sigma()
p_mat_hat = np.dot(np.dot(sens_matrix.T, np.linalg.inv(sigma_mat)),
sens_matrix)
npt.assert_array_equal(p_mat, p_mat_hat,
err_msg="P matrix calculated incorrectly")
q_vec_hat = -np.dot(np.dot(epsilon, np.linalg.inv(sigma_mat)),
sens_matrix)
npt.assert_array_equal(q_vec, q_vec_hat,
err_msg="q vector calculated incorrectly")
def test_log_like(self):
"""Tests the log likelyhood of the experiment given simulation data
"""
exp = SimpleExperiment()
# Generate some simulation data
sim_data = self.simSimp(self.models)
# Align the data so that it is evaluated at the same time steps as
# the experiment
aligned_data = exp.align(sim_data)
epsilon = exp.compare(sim_data)
sigma_mat = exp.get_sigma()
log_like_hat = -0.5 * np.dot(epsilon, np.dot(np.linalg.inv(sigma_mat),
epsilon))
log_like = exp.get_log_like(sim_data)
self.assertEqual(log_like_hat, log_like)
def plot(self, name, exp, simdata):
"""Plots a comparisson
"""
fig = plt.figure()
ax1 = fig.gca()
ax1.plot(exp.data[0], exp.data[1], 'o', label='Experiment')
ax1.plot(simdata[0], simdata[1][0], '-', label='Simulation')
fig.savefig(name + '.pdf')
| gpl-2.0 |
MohammedWasim/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
nelson-liu/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 99 | 4163 |
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
wgm2111/wgm-kaggle | poker/random_forest.py | 1 | 2618 | #!/usr/bin/env python
# Author: William G.K. Martin (wgm2111@cu where cu=columbia.edu)
# copyright (c) 2011
# liscence: BSD style
"""
This rewrite uses pandas to read data more elegantly.
"""
import numpy as np
from pandas import read_csv
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.cross_validation import cross_val_score
from basic_imports import test_card_array_true
# Read in the training set
# --
ftrain = 'train.csv'
train_data_frame = read_csv(ftrain) # data frame
ftest = 'test.csv'
test_data_frame = read_csv(ftest)
fsample_submission = "sampleSubmission.csv"
sample_submission = read_csv(fsample_submission)
fsubmission = "my_submission.csv"
# Make arrays out of the data
Xtrain = train_data_frame.as_matrix(
['S1', 'C1', 'S2', 'C2','S3', 'C3','S4', 'C4','S5', 'C5'])
y = train_data_frame.as_matrix(['hand']).reshape(-1)
# read in the test set
# --
Xtest = test_data_frame.as_matrix(
['S1', 'C1', 'S2', 'C2','S3', 'C3','S4', 'C4','S5', 'C5'])
# Read the sample submission
# permutations
# --
perm_array = np.eye(10, k=-2, dtype='int') + np.eye(10, k=8, dtype='int')
old = Xtrain
Xperms = [Xtrain,]
yperms = [y,]
for i in range(4):
new = np.dot(old, perm_array)
Xperms.append(new)
yperms.append(y)
old = new
Xperms = np.concatenate(Xperms)
yperms = np.concatenate(yperms)
# Transform the problem
def imbed52(X):
"Imbed hands into a 52 dimensional space"
N = X.shape[0]
Icard = 13*(X[:, ::2]-1) + X[:, 1::2]-1
Icard = Icard + 52 * np.arange(N).reshape(N, 1)
Xcard = np.zeros((N, 52), dtype='int')
Xcard.flat[Icard] +=1
return Xcard
Xcard = imbed52(Xperms)
Xcard_test = imbed52(Xtest)
# RAndom Forest classifier
rfc = RandomForestClassifier()
# print("Random Forest CV score: {0}".format(cross_val_score(rfc, Xperms, yperms)))
rfc52 = RandomForestClassifier()
N = Xtrain.shape[0]
rfc52.fit(Xcard[:N], yperms[:N])
print("Random Forest CV score: {0}".format(cross_val_score(rfc52, Xcard, yperms)))
ytest = rfc52.predict(Xcard_test)
# Nperms = 5 * Xtrain.shape[0]
# Xperms = sp.zeros((Nperms, 10))
# yperms = sp.zeros((Nperms,))
# old=Xtrain
# for i in range(4):
# new = sp.dot(old, perm_array))
# Xperms[i * Xtrain.shape[0], :] = new
# yperms[i * Xtrain.shape[0], :] = y
# old = new
# for i in range(4):
# Xperms.append(sp.dot())
# Xperms.append(sp.dot())
# Xbig = sp.array([
# sp.dot(X)])
# for i in range(5):
# perm_array = np.concatenate([
# ])
# make an alternative form of the data
| bsd-3-clause |
mluessi/dronin | python/ins/pyins.py | 11 | 12810 | from sympy import symbols, lambdify, sqrt
from sympy import MatrixSymbol, Matrix
from numpy import cos, sin, power
from sympy.matrices import *
from quaternions import *
import numpy
# this is the set of (currently) recommend INS settings. modified from
# https://raw.githubusercontent.com/wiki/TauLabs/TauLabs/files/htfpv-sparky-nav_20130527.uav
default_mag_var = numpy.array([10.0, 10.0, 100.0])
default_gyro_var = numpy.array([1e-5, 1e-5, 1e-4])
default_accel_var = numpy.array([0.01, 0.01, 0.01])
default_baro_var = 0.1
default_gps_var=numpy.array([1e-3,1e-2,10])
class PyINS:
GRAV = 9.805
MAG_HEADING = True # use the compass for heading only
def __init__(self):
""" Creates the PyINS class and prepares the equations.
Important variables are
* X - the vector of state variables
* Xd - the vector of state derivatives for state and inputs
* Y - the vector of outputs for current state value
"""
# Create state variable
Px, Py, Pz = symbols('Px Py Pz')
Vx, Vy, Vz = symbols('Vx Vy Vz')
q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
wbx, wby, wbz = symbols('wbx wby wbz')
abz = symbols('abz')
# Helper variables
P = Matrix([Px, Py, Pz])
V = Matrix([Vx, Vy, Vz])
q = Matrix([q0, q1, q2, q3])
wb = Matrix([wbx, wby, wbz])
X = Matrix([Px, Py, Pz, Vx, Vy, Vz, q0, q1, q2, q3, wbx, wby, wbz, abz])
# strap down equation matrix
Q = Matrix([[-q1, -q2, -q3],[q0, -q3, q2],[q3, q0, -q1],[-q2, q1, q0]])
# Inputs to system
wx, wy, wz = symbols('wx, wy, wz')
ax, ay, az = symbols('ax, ay, az')
U = [wx, wy, wz, ax, ay, az]
# Noise inputs
nwx, nwy, nwz = symbols('nwx, nwy, nwz') # gyro noise
nax, nay, naz = symbols('nax, nay, naz') # accel noise
nwbx, nwby, nwbz = symbols('nwbx, nwby, nwbz') # gyro bias drift
nabz = symbols('nabz') # z-accel bias drift
n = [nwx, nwy, nwz, nax, nay, naz, nwbx, nwby, nwbz, nabz]
# State equations
Rbe = Matrix([[(q0*q0+q1*q1-q2*q2-q3*q3), 2*(q1*q2+q0*q3), 2*(q1*q3-q0*q2)],
[2*(q1*q2-q0*q3), (q0*q0-q1*q1+q2*q2-q3*q3), 2*(q2*q3+q0*q1)],
[2*(q1*q3+q0*q2), 2*(q2*q3-q0*q1), (q0*q0-q1*q1-q2*q2+q3*q3)]])
Reb = Rbe.T
atrue = Matrix([ax-nax,ay-nay,az-naz-abz])
Pd = V
Vd = (Reb * atrue) + Matrix([0,0,self.GRAV])
qd = Q/2 * Matrix([wx-nwx-wbx,wy-nwy-wby,wz-nwz-wbz]) # measured - noise - bias
wbd = Matrix([nwbx, nwby, nwbz])
abd = nabz
# combine all the derivatives.
Xd = Matrix([Pd[0], Pd[1], Pd[2], Vd[0], Vd[1], Vd[2], qd[0], qd[1], qd[2], qd[3], wbd[0], wbd[1], wbd[2], abd])
Xd_nn = Xd.subs(nwbx,0).subs(nwby,0).subs(nwbz,0).subs(nwx,0).subs(nwy,0).subs(nwz,0).subs(nabz,0) # version without noise inputs
# Compute jacobians of state equations.
wrt = [Px, Py, Pz, Vx, Vy, Vz, q0, q1, q2, q3, wbx, wby, wbz, abz, wx, wy, wz]
Xdl = lambdify(wrt,Xd_nn)
F = Xd_nn.jacobian(X)
G = Xd.jacobian(n)
# Output equations.
#Be = MatrixSymbol('Be',3,1) # mag near home location
Be = Matrix([400, 0, 1600])
if self.MAG_HEADING:
# Project the mag vector to the body frame by only the rotational component
k1 = sqrt(power(q0**2 + q1**2 - q2**2 - q3**2,2) + power(q0*q3*2 + q1*q2*2,2))
Rbh = Matrix([[(q0**2 + q1**2 - q2**2 - q3**2)/k1, (2*q0*q3 + 2*q1*q2)/k1 , 0],
[-(2*q0*q3 + 2*q1*q2)/k1 , (q0**2 + q1**2 - q2**2 - q3**2)/k1 , 0],
[0,0,1]]);
Bb = Rbh * Matrix(Be); # predicted mag in body frame
else:
Bb = Rbe * Matrix(Be); # predicted mag in body frame
Y = Matrix([P, V, Bb, Matrix([-Pz])]) # predicted outputs
H = Y.jacobian(X)
# Store the useful functions.
self.X = X # vector of state variables
self.Xd = Xd # vector of derivatives in state space as a function of state and inputs
self.Y = Y # vector of functions that predict the output for the current state space output
self.U = U # vector of inputs
self.noise = n
# Functions to linearize derivatives around current point.
self.F = F
self.G = G
self.H = H
# These are populated in the prepare method
self.r_X = numpy.matrix([0,0,0,0,0,0,1.0,0,0,0,0,0,0,0]).T
self.r_P = []
self.Q = []
self.R = []
self.l_Xd = []
self.l_F = []
self.l_G = []
self.l_H = []
self.l_Y = []
# state format used by common code
self.state = numpy.zeros((16))
self.state[0:14] = self.r_X[0:14].T
self.state[-1] = self.r_X[-1]
def prepare(self):
""" Prepare to run data through the PyINS
Initializes the state variables and covariance. Compiles equations to run faster.
* self.R is the measurement noise (pos, vel, mag, baro)
* self.Q is the process noise (gyro, accel, gyro bias, accel bias)
"""
Q = [default_gyro_var[0], default_gyro_var[1], default_gyro_var[2],
default_accel_var[0], default_accel_var[1], default_accel_var[2],
5e-7, 5e-7, 2e-6, 5e-4]
R = [default_gps_var[0], default_gps_var[0], default_gps_var[2],
default_gps_var[1], default_gps_var[1], default_gps_var[2],
default_mag_var[0], default_mag_var[1], default_mag_var[2], default_baro_var]
self.r_X = numpy.matrix([0,0,0,0,0,0,1.0,0,0,0,0,0,0,0]).T
self.r_P = numpy.diagflat([25,25,25,
5,5,5,
1e-5,1e-5,1e-5,1e-5,
1e-9,1e-9,1e-9,
1e-7])
self.R = numpy.diagflat(R)
self.Q = numpy.diagflat(Q)
# the noise inputs to the system are not used in the prediction (or assume their mean of zero)
Xd = self.Xd
for i in self.noise:
Xd = Xd.subs(i,0)
if True:
from sympy.utilities.autowrap import ufuncify
self.l_Xd = lambdify([self.X, self.U],Xd, "numpy")
# Jacobian of state derivatives as a function of state and input
self.l_F = lambdify([self.X, self.U],Xd.jacobian(self.X), "numpy")
# Jacobian of state derivatives with process noise as a function of state
# note this uses the original Xd with noise terms still included
self.l_G = lambdify([self.X], self.Xd.jacobian(self.noise), "numpy")
# Jacobian of ouputs versus state
self.l_H = lambdify([self.X], self.Y.jacobian(self.X), "numpy")
# Functional form to predict outputs
self.l_Y = lambdify([self.X], self.Y, "numpy")
else:
self.l_Xd = lambdify([self.X, self.U],Xd)
# Jacobian of state derivatives as a function of state and input
self.l_F = lambdify([self.X, self.U],Xd.jacobian(self.X))
# Jacobian of state derivatives with process noise as a function of state
# note this uses the original Xd with noise terms still included
self.l_G = lambdify([self.X], self.Xd.jacobian(self.noise))
# Jacobian of ouputs versus state
self.l_H = lambdify([self.X], self.Y.jacobian(self.X))
# Functional form to predict outputs
self.l_Y = lambdify([self.X], self.Y)
def normalize(self):
""" Make sure the quaternion state stays normalized
"""
q = self.r_X[6:10,0]
qnew = numpy.zeros((4,1))
qn = sqrt(q[0]**2 + q[1]**2 + q[2]**2 + q[3]**2)
for i in range(4):
qnew[i] = q[i] / qn
self.r_X[6:10] = qnew
def suppress_bias(self):
self.r_X[10:] = 0
self.r_P[10:,:] = zeros(4,14)
self.r_P[:,10:] = zeros(14,4)
def configure(self, mag_var=None, gyro_var=None, accel_var=None, baro_var=None, gps_var=None):
""" configure the INS parameters """
Q = [default_gyro_var[0], default_gyro_var[1], default_gyro_var[2],
default_accel_var[0], default_accel_var[1], default_accel_var[2],
5e-7, 5e-7, 2e-6, 5e-4]
R = [default_gps_var[0], default_gps_var[0], default_gps_var[2],
default_gps_var[1], default_gps_var[1], default_gps_var[2],
default_mag_var[0], default_mag_var[1], default_mag_var[2], default_baro_var]
if mag_var is not None:
self.R[6,6] = mag_var[0]
self.R[7,7] = mag_var[1]
self.R[8,8] = mag_var[2]
if gyro_var is not None:
self.Q[0,0] = gyro_var[0]
self.Q[1,1] = gyro_var[1]
self.Q[2,2] = gyro_var[2]
if accel_var is not None:
self.Q[3,3] = accel_var[0]
self.Q[4,4] = accel_var[1]
self.Q[5,5] = accel_var[2]
if baro_var is not None:
self.R[9,9] = baro_var
if gps_var is not None:
self.R[0,0] = gps_var[0]
self.R[1,1] = gps_var[0]
self.R[3,3] = gps_var[1]
self.R[4,4] = gps_var[1]
self.R[2,2] = gps_var[2]
self.R[5,5] = gps_var[2]
def predict(self, gyros, accels, dT = 1.0/666.0):
""" Perform the prediction step
"""
U = numpy.concatenate((gyros, accels))
# fourth-order runga kuta state prediction
k1 = self.l_Xd(self.r_X.tolist(), U)
k2 = self.l_Xd((self.r_X + 0.5*dT*k1).tolist(), U)
k3 = self.l_Xd((self.r_X + 0.5*dT*k2).tolist(), U)
k4 = self.l_Xd((self.r_X + dT*k3).tolist() , U)
d = (k1 + 2*k2 + 2*k3 + k4) / 6
f = self.l_F(self.r_X.tolist(), U)
g = self.l_G(self.r_X.T.tolist())
self.r_X = self.r_X + dT * d
P = self.r_P
#self.r_P = P + dT * (numpy.f*P + P*f.T) + (dT**2) * g * diag(self.Q) * g.T
#self.r_P = (eye(NUMX)+F*T)*Pplus*(eye(NUMX)+F*T)' + T^2*G*diag(Q)*G'
I = numpy.matrix(numpy.identity(14))
self.r_P = (I + f * dT) * P * (I + f * dT).T + (dT**2) * g * diag(self.Q) * g.T
self.normalize()
self.state[0:14] = self.r_X[0:14].T
self.state[-1] = self.r_X[-1]
def correction(self, pos=None, vel=None, mag=None, baro=None):
""" Perform the INS correction based on the provided corrections
"""
P = self.r_P
Y = self.l_Y([self.r_X.tolist()])
H = self.l_H([self.r_X.tolist()])
idx = []
Z = []
if pos is not None:
idx.extend((0,1))
Z.extend([[pos[0]]])
Z.extend([[pos[1]]])
if vel is not None:
idx.extend((3,4,5))
Z.extend([[vel[0]]])
Z.extend([[vel[1]]])
Z.extend([[vel[2]]])
if mag is not None:
if self.MAG_HEADING:
# Remove the influence of attitude
q0,q1,q2,q3 = self.r_X[6:10]
k1 = power( (q0*q1*2.0 + q2*q3*2.0)**2 + (q0*q0-q1*q1-q2*q2+q3*q3)**2, -0.5)
k2 = sqrt(1.0 - (q0*q2*2.0 - q1*q3*2.0)**2)
Rbh = numpy.zeros((3,3))
Rbh[0,0] = k2
Rbh[0,2] = q0*q2*-2.0+q1*q3*2.0
Rbh[1,0] = k1*(q0*q1*2.0+q2*q3*2.0)*(q0*q2*2.0-q1*q3*2.0)
Rbh[1,1] = k1*(q0*q0-q1*q1-q2*q2+q3*q3)
Rbh[1,2] = k1*sqrt(-power(q0*q2*2.0-q1*q3*2.0, 2.0)+1.0)*(q0*q1*2.0+q2*q3*2.0)
Rbh[2,0] = k1*(q0*q2*2.0-q1*q3*2.0)*(q0*q0-q1*q1-q2*q2+q3*q3)
Rbh[2,1] = -k1*(q0*q1*2.0+q2*q3*2.0)
Rbh[2,2] = k1*k2*(q0*q0-q1*q1-q2*q2+q3*q3)
print "Here: " + `Rbh.shape` + " " + `mag.shape`
print `Rbh.dot(mag).shape`
mag = Rbh.dot(mag)
Z.extend([[mag[0]],[mag[1]]])
else:
# Use full mag shape
Z.extend(mag[0:2])
idx.extend((6,7))
if baro is not None:
idx.append(9)
Z.append(baro)
# construct appropriately sized predictions based on provided inputs
# this method of creating a matrix from a list is really ugly and
# sensitive to how the elements are formatted in the list to create
# the correct shape of the matrix
Z = numpy.matrix(Z)
Y = Y[idx]
H = H[idx,:]
R = self.R[idx,:][:,idx]
# calculate Kalman gain matrix
# K = P*H.T/(H*P*H.T + R);
A = numpy.matrix(P*H.T)
B = numpy.matrix(H*P*H.T + R)
K = numpy.linalg.lstsq(B, A.T)[0].T
self.normalize()
self.r_X = self.r_X + K*(Z-Y)
self.r_P = P - K*H*P;
self.state[0:14] = self.r_X[0:14].T
self.state[-1] = self.r_X[-1]
def test():
""" test the INS with simulated data
"""
from numpy import cos, sin
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2,2)
ins = PyINS()
ins.prepare()
dT = 1.0 / 666.0
STEPS = 100000
history = numpy.zeros((STEPS,14))
times = numpy.zeros((STEPS,1))
ins.r_X[6:10] = numpy.matrix([1/sqrt(2),0,0,1/sqrt(2)]).T
for k in range(STEPS):
ROLL = 0.5
YAW = 0.5
ins.predict([0,0,YAW], [0,PyINS.GRAV*sin(ROLL),-PyINS.GRAV*cos(ROLL) - 0.1], dT=0.0015)
history[k,:] = ins.r_X.T
times[k] = k * dT
angle = numpy.pi/3 + YAW * dT * k # radians
height = 1.0 * k * dT
if True and k % 60 == 59:
ins.correction(pos=[[10],[5],[-height]])
if True and k % 60 == 59:
ins.correction(vel=[[0],[0],[0]])
if k % 20 == 8:
ins.correction(baro=[height])
if True and k % 20 == 15:
ins.correction(mag=[[400 * cos(angle)], [-400 * sin(angle)], [1600]])
ins.normalize()
if k < 200:
ins.suppress_bias()
if k % 50 == 0:
print `k` + " Att: " + `quat_rpy_display(ins.r_X[6:10])` + " norm: " + `Matrix(ins.r_X[6:10]).norm()`
ax[0][0].cla()
ax[0][0].plot(times[0:k:4],history[0:k:4,0:3])
ax[0][1].cla()
ax[0][1].plot(times[0:k:4],history[0:k:4,3:6])
ax[1][0].cla()
ax[1][0].plot(times[0:k:4],history[0:k:4,6:10])
ax[1][1].cla()
ax[1][1].plot(times[0:k:4],history[0:k:4,10:])
#print `ins.r_P`
#ax[0][2].imshow(ins.r_P)
plt.draw()
fig.show()
if __name__ =='__main__':
test()
| gpl-3.0 |
saketkc/statsmodels | statsmodels/examples/ex_generic_mle_tdist.py | 29 | 39735 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 08:28:04 2010
Author: josef-pktd
"""
from __future__ import print_function
from statsmodels.compat.python import zip
import numpy as np
from scipy import stats, special, optimize
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
def maxabs(arr1, arr2):
return np.max(np.abs(arr1 - arr2))
def maxabsrel(arr1, arr2):
return np.max(np.abs(arr2 / arr1 - 1))
#global
store_params = []
class MyT(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Linear Model with t-distributed errors
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
# copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math :: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
#print len(params),
store_params.append(params)
if not self.fixed_params is None:
#print 'using fixed'
params = self.expandparams(params)
beta = params[:-2]
df = params[-2]
scale = params[-1]
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx
#Example:
np.random.seed(98765678)
nobs = 1000
nvars = 6
df = 5
rvs = np.random.randn(nobs, nvars-1)
data_exog = sm.add_constant(rvs, prepend=False)
xbeta = 0.9 + 0.1*rvs.sum(1)
data_endog = xbeta + 0.1*np.random.standard_t(df, size=nobs)
print(data_endog.var())
res_ols = sm.OLS(data_endog, data_exog).fit()
print(res_ols.scale)
print(np.sqrt(res_ols.scale))
print(res_ols.params)
kurt = stats.kurtosis(res_ols.resid)
df_fromkurt = 6./kurt + 4
print(stats.t.stats(df_fromkurt, moments='mvsk'))
print(stats.t.stats(df, moments='mvsk'))
modp = MyT(data_endog, data_exog)
start_value = 0.1*np.ones(data_exog.shape[1]+2)
#start_value = np.zeros(data_exog.shape[1]+2)
#start_value[:nvars] = sm.OLS(data_endog, data_exog).fit().params
start_value[:nvars] = res_ols.params
start_value[-2] = df_fromkurt #10
start_value[-1] = np.sqrt(res_ols.scale) #0.5
modp.start_params = start_value
#adding fixed parameters
fixdf = np.nan * np.zeros(modp.start_params.shape)
fixdf[-2] = 100
fixone = 0
if fixone:
modp.fixed_params = fixdf
modp.fixed_paramsmask = np.isnan(fixdf)
modp.start_params = modp.start_params[modp.fixed_paramsmask]
else:
modp.fixed_params = None
modp.fixed_paramsmask = None
resp = modp.fit(start_params = modp.start_params, disp=1, method='nm')#'newton')
#resp = modp.fit(start_params = modp.start_params, disp=1, method='newton')
print('\nestimation results t-dist')
print(resp.params)
print(resp.bse)
resp2 = modp.fit(start_params = resp.params, method='Newton')
print('using Newton')
print(resp2.params)
print(resp2.bse)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(modp.start_params, modp.loglike, epsilon=-1e-4)
tmp = modp.loglike(modp.start_params)
print(tmp.shape)
#np.linalg.eigh(np.linalg.inv(hb))[0]
pp=np.array(store_params)
print(pp.min(0))
print(pp.max(0))
##################### Example: Pareto
# estimating scale doesn't work yet, a bug somewhere ?
# fit_ks works well, but no bse or other result statistics yet
#import for kstest based estimation
#should be replace
import statsmodels.sandbox.distributions.sppatch
class MyPareto(GenericLikelihoodModel):
'''Maximum Likelihood Estimation pareto distribution
first version: iid case, with constant parameters
'''
#copied from stats.distribution
def pdf(self, x, b):
return b * x**(-b-1)
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
def nloglikeobs(self, params):
#print params.shape
if not self.fixed_params is None:
#print 'using fixed'
params = self.expandparams(params)
b = params[0]
loc = params[1]
scale = params[2]
#loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
logpdf = np_log(b) - (b+1.)*np_log(x) #use np_log(1 + x) for Pareto II
logpdf -= np.log(scale)
#lb = loc + scale
#logpdf[endog<lb] = -inf
#import pdb; pdb.set_trace()
logpdf[x<1] = -10000 #-np.inf
return -logpdf
def fit_ks(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
this doesn't trim lower values during ks optimization
'''
rvs = self.endog
rvsmin = rvs.min()
fixdf = np.nan * np.ones(3)
self.fixed_params = fixdf
self.fixed_paramsmask = np.isnan(fixdf)
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
#est = self.fit_fr(rvs, 1., frozen=[np.nan, loc, np.nan])
self.fixed_params[1] = loc
est = self.fit(start_params=self.start_params[self.fixed_paramsmask]).params
#est = self.fit(start_params=self.start_params, method='nm').params
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
locest = optimize.fmin(pareto_ks, rvsmin - 1.5, (rvs,))
est = stats.pareto.fit_fr(rvs, 0., frozen=[np.nan, locest, np.nan])
args = (est[0], locest[0], est[1])
return args
def fit_ks1_trim(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
'''
self.nobs = self.endog.shape[0]
rvs = np.sort(self.endog)
rvsmin = rvs.min()
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
est = stats.pareto.fit_fr(rvs, frozen=[np.nan, loc, np.nan])
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
#locest = optimize.fmin(pareto_ks, rvsmin*0.7, (rvs,))
maxind = min(np.floor(self.nobs*0.95).astype(int), self.nobs-10)
res = []
for trimidx in range(self.nobs//2, maxind):
xmin = loc = rvs[trimidx]
res.append([trimidx, pareto_ks(loc-1e-10, rvs[trimidx:])])
res = np.array(res)
bestidx = res[np.argmin(res[:,1]),0].astype(int)
print(bestidx)
locest = rvs[bestidx]
est = stats.pareto.fit_fr(rvs[bestidx:], 1., frozen=[np.nan, locest, np.nan])
args = (est[0], locest, est[1])
return args
def fit_ks1(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
'''
rvs = self.endog
rvsmin = rvs.min()
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
est = stats.pareto.fit_fr(rvs, 1., frozen=[np.nan, loc, np.nan])
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
#locest = optimize.fmin(pareto_ks, rvsmin*0.7, (rvs,))
locest = optimize.fmin(pareto_ks, rvsmin - 1.5, (rvs,))
est = stats.pareto.fit_fr(rvs, 1., frozen=[np.nan, locest, np.nan])
args = (est[0], locest[0], est[1])
return args
#y = stats.pareto.rvs(1, loc=10, scale=2, size=nobs)
y = stats.pareto.rvs(1, loc=0, scale=2, size=nobs)
par_start_params = np.array([1., 9., 2.])
mod_par = MyPareto(y)
mod_par.start_params = np.array([1., 10., 2.])
mod_par.start_params = np.array([1., -9., 2.])
mod_par.fixed_params = None
fixdf = np.nan * np.ones(mod_par.start_params.shape)
fixdf[1] = 9.9
#fixdf[2] = 2.
fixone = 0
if fixone:
mod_par.fixed_params = fixdf
mod_par.fixed_paramsmask = np.isnan(fixdf)
mod_par.start_params = mod_par.start_params[mod_par.fixed_paramsmask]
mod_par.df_model = 2
mod_par.df_resid = mod_par.endog.shape[0] - mod_par.df_model
mod_par.data.xnames = ['shape', 'scale']
else:
mod_par.fixed_params = None
mod_par.fixed_paramsmask = None
mod_par.df_model = 3
mod_par.df_resid = mod_par.endog.shape[0] - mod_par.df_model
mod_par.data.xnames = ['shape', 'loc', 'scale']
res_par = mod_par.fit(start_params=mod_par.start_params, method='nm', maxfun=10000, maxiter=5000)
#res_par2 = mod_par.fit(start_params=res_par.params, method='newton', maxfun=10000, maxiter=5000)
res_parks = mod_par.fit_ks1()
print(res_par.params)
#print res_par2.params
print(res_parks)
print(res_par.params[1:].sum(), sum(res_parks[1:]), mod_par.endog.min())
#start new model, so we don't get two result instances with the same model instance
mod_par = MyPareto(y)
mod_par.fixed_params = fixdf
mod_par.fixed_paramsmask = np.isnan(fixdf)
mod_par.df_model = mod_par.fixed_paramsmask.sum()
mod_par.df_resid = mod_par.endog.shape[0] - mod_par.df_model
#mod_par.data.xnames = np.array(['shape', 'loc', 'scale'])[mod_par.fixed_paramsmask].tolist() # works also
mod_par.data.xnames = [name for (name, incl) in zip(['shape', 'loc', 'scale'], mod_par.fixed_paramsmask) if incl]
res_par3 = mod_par.start_params = par_start_params[mod_par.fixed_paramsmask]
res5 = mod_par.fit(start_params=mod_par.start_params)
##res_parks2 = mod_par.fit_ks()
##
##res_parkst = mod_par.fit_ks1_trim()
##print res_parkst
print(res5.summary())
print(res5.t_test([[1,0]]))
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
0.0686702747648
0.0164150896481
0.128121386381
[ 0.10370428 0.09921315 0.09676723 0.10457413 0.10201618 0.89964496]
(array(0.0), array(1.4552599885729831), array(0.0), array(2.5072143354058238))
(array(0.0), array(1.6666666666666667), array(0.0), array(6.0))
repr(start_params) array([ 0.10370428, 0.09921315, 0.09676723, 0.10457413, 0.10201618,
0.89964496, 6.39309417, 0.12812139])
Optimization terminated successfully.
Current function value: -679.951339
Iterations: 398
Function evaluations: 609
estimation results t-dist
[ 0.10400826 0.10111893 0.09725133 0.10507788 0.10086163 0.8996041
4.72131318 0.09825355]
[ 0.00365493 0.00356149 0.00349329 0.00362333 0.003732 0.00362716
0.7232824 0.00388829]
repr(start_params) array([ 0.10400826, 0.10111893, 0.09725133, 0.10507788, 0.10086163,
0.8996041 , 4.72131318, 0.09825355])
Optimization terminated successfully.
Current function value: -679.950443
Iterations 3
using Newton
[ 0.10395383 0.10106762 0.09720665 0.10503384 0.10080599 0.89954546
4.70918964 0.09815885]
[ 0.00365299 0.00355968 0.00349147 0.00362166 0.00373015 0.00362533
0.72014031 0.00388434]
()
[ 0.09992709 0.09786601 0.09387356 0.10229919 0.09756623 0.85466272
4.60459182 0.09661986]
[ 0.11308292 0.10828401 0.1028508 0.11268895 0.10934726 0.94462721
7.15412655 0.13452746]
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
>>> res_par.params
array([ 7.42705803e+152, 2.17339053e+153])
>>> mod_par.loglike(mod_p.start_params)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'mod_p' is not defined
>>> mod_par.loglike(mod_par.start_params)
-1085.1993430947232
>>> np.log(mod_par.pdf(mod_par.start_params))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: pdf() takes exactly 3 arguments (2 given)
>>> np.log(mod_par.pdf(*mod_par.start_params))
0.69314718055994529
>>> mod_par.loglike(*mod_par.start_params)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: loglike() takes exactly 2 arguments (3 given)
>>> mod_par.loglike(mod_par.start_params)
-1085.1993430947232
>>> np.log(stats.pareto.pdf(y[0],*mod_par.start_params))
-4.6414308627431353
>>> mod_par.loglike(mod_par.start_params)
-1085.1993430947232
>>> mod_par.nloglikeobs(mod_par.start_params)[0]
0.29377232943845044
>>> mod_par.start_params
array([ 1., 2.])
>>> np.log(stats.pareto.pdf(y[0],1,9.5,2))
-1.2806918394368461
>>> mod_par.fixed_params= None
>>> mod_par.nloglikeobs(np.array([1., 10., 2.]))[0]
0.087533156771285828
>>> y[0]
12.182956907488885
>>> mod_para.endog[0]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'mod_para' is not defined
>>> mod_par.endog[0]
12.182956907488885
>>> np.log(stats.pareto.pdf(y[0],1,10,2))
-0.86821349410251702
>>> np.log(stats.pareto.pdf(y[0],1.,10.,2.))
-0.86821349410251702
>>> stats.pareto.pdf(y[0],1.,10.,2.)
0.41970067762301644
>>> mod_par.loglikeobs(np.array([1., 10., 2.]))[0]
-0.087533156771285828
>>>
'''
'''
>>> mod_par.nloglikeobs(np.array([1., 10., 2.]))[0]
0.86821349410251691
>>> np.log(stats.pareto.pdf(y,1.,10.,2.)).sum()
-2627.9403758026938
'''
#'''
#C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
# please delete it from your matplotlibrc file
# warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
#0.0686702747648
#0.0164150896481
#0.128121386381
#[ 0.10370428 0.09921315 0.09676723 0.10457413 0.10201618 0.89964496]
#(array(0.0), array(1.4552599885729827), array(0.0), array(2.5072143354058203))
#(array(0.0), array(1.6666666666666667), array(0.0), array(6.0))
#repr(start_params) array([ 0.10370428, 0.09921315, 0.09676723, 0.10457413, 0.10201618,
# 0.89964496, 6.39309417, 0.12812139])
#Optimization terminated successfully.
# Current function value: -679.951339
# Iterations: 398
# Function evaluations: 609
#
#estimation results t-dist
#[ 0.10400826 0.10111893 0.09725133 0.10507788 0.10086163 0.8996041
# 4.72131318 0.09825355]
#[ 0.00365493 0.00356149 0.00349329 0.00362333 0.003732 0.00362716
# 0.72325227 0.00388822]
#repr(start_params) array([ 0.10400826, 0.10111893, 0.09725133, 0.10507788, 0.10086163,
# 0.8996041 , 4.72131318, 0.09825355])
#Optimization terminated successfully.
# Current function value: -679.950443
# Iterations 3
#using Newton
#[ 0.10395383 0.10106762 0.09720665 0.10503384 0.10080599 0.89954546
# 4.70918964 0.09815885]
#[ 0.00365299 0.00355968 0.00349147 0.00362166 0.00373015 0.00362533
# 0.72014669 0.00388436]
#()
#[ 0.09992709 0.09786601 0.09387356 0.10229919 0.09756623 0.85466272
# 4.60459182 0.09661986]
#[ 0.11308292 0.10828401 0.1028508 0.11268895 0.10934726 0.94462721
# 7.15412655 0.13452746]
#repr(start_params) array([ 1., 2.])
#Warning: Maximum number of function evaluations has been exceeded.
#repr(start_params) array([ 3.06504406e+302, 3.29325579e+303])
#Traceback (most recent call last):
# File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\examples\ex_generic_mle_tdist.py", line 222, in <module>
# res_par2 = mod_par.fit(start_params=res_par.params, method='newton', maxfun=10000, maxiter=5000)
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 547, in fit
# disp=disp, callback=callback, **kwargs)
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 262, in fit
# newparams = oldparams - np.dot(np.linalg.inv(H),
# File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 423, in inv
# return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 306, in solve
# raise LinAlgError, 'Singular matrix'
#numpy.linalg.linalg.LinAlgError: Singular matrix
#
#>>> mod_par.fixed_params
#array([ NaN, 10., NaN])
#>>> mod_par.start_params
#array([ 1., 2.])
#>>> np.source(stats.pareto.fit_fr)
#In file: c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\distributions_patch.py
#
#def fit_fr(self, data, *args, **kwds):
# '''estimate distribution parameters by MLE taking some parameters as fixed
#
# Parameters
# ----------
# data : array, 1d
# data for which the distribution parameters are estimated,
# args : list ? check
# starting values for optimization
# kwds :
#
# - 'frozen' : array_like
# values for frozen distribution parameters and, for elements with
# np.nan, the corresponding parameter will be estimated
#
# Returns
# -------
# argest : array
# estimated parameters
#
#
# Examples
# --------
# generate random sample
# >>> np.random.seed(12345)
# >>> x = stats.gamma.rvs(2.5, loc=0, scale=1.2, size=200)
#
# estimate all parameters
# >>> stats.gamma.fit(x)
# array([ 2.0243194 , 0.20395655, 1.44411371])
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, np.nan, np.nan])
# array([ 2.0243194 , 0.20395655, 1.44411371])
#
# keep loc fixed, estimate shape and scale parameters
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, np.nan])
# array([ 2.45603985, 1.27333105])
#
# keep loc and scale fixed, estimate shape parameter
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
# array([ 3.00048828])
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.2])
# array([ 2.57792969])
#
# estimate only scale parameter for fixed shape and loc
# >>> stats.gamma.fit_fr(x, frozen=[2.5, 0.0, np.nan])
# array([ 1.25087891])
#
# Notes
# -----
# self is an instance of a distribution class. This can be attached to
# scipy.stats.distributions.rv_continuous
#
# *Todo*
#
# * check if docstring is correct
# * more input checking, args is list ? might also apply to current fit method
#
# '''
# loc0, scale0 = map(kwds.get, ['loc', 'scale'],[0.0, 1.0])
# Narg = len(args)
#
# if Narg == 0 and hasattr(self, '_fitstart'):
# x0 = self._fitstart(data)
# elif Narg > self.numargs:
# raise ValueError("Too many input arguments.")
# else:
# args += (1.0,)*(self.numargs-Narg)
# # location and scale are at the end
# x0 = args + (loc0, scale0)
#
# if 'frozen' in kwds:
# frmask = np.array(kwds['frozen'])
# if len(frmask) != self.numargs+2:
# raise ValueError("Incorrect number of frozen arguments.")
# else:
# # keep starting values for not frozen parameters
# x0 = np.array(x0)[np.isnan(frmask)]
# else:
# frmask = None
#
# #print x0
# #print frmask
# return optimize.fmin(self.nnlf_fr, x0,
# args=(np.ravel(data), frmask), disp=0)
#
#>>> stats.pareto.fit_fr(y, 1., frozen=[np.nan, loc, np.nan])
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'loc' is not defined
#
#>>> stats.pareto.fit_fr(y, 1., frozen=[np.nan, 10., np.nan])
#array([ 1.0346268 , 2.00184808])
#>>> stats.pareto.fit_fr(y, (1.,2), frozen=[np.nan, 10., np.nan])
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\distributions_patch.py", line 273, in fit_fr
# x0 = np.array(x0)[np.isnan(frmask)]
#ValueError: setting an array element with a sequence.
#
#>>> stats.pareto.fit_fr(y, [1.,2], frozen=[np.nan, 10., np.nan])
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\distributions_patch.py", line 273, in fit_fr
# x0 = np.array(x0)[np.isnan(frmask)]
#ValueError: setting an array element with a sequence.
#
#>>> stats.pareto.fit_fr(y, frozen=[np.nan, 10., np.nan])
#array([ 1.03463526, 2.00184809])
#>>> stats.pareto.pdf(y, 1.03463526, 10, 2.00184809).sum()
#173.33947284555239
#>>> mod_par(1.03463526, 10, 2.00184809)
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#TypeError: 'MyPareto' object is not callable
#
#>>> mod_par.loglike(1.03463526, 10, 2.00184809)
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#TypeError: loglike() takes exactly 2 arguments (4 given)
#
#>>> mod_par.loglike((1.03463526, 10, 2.00184809))
#-962.21623668859741
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 10, 2.00184809)).sum()
#-inf
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 9, 2.00184809)).sum()
#-3074.5947476137271
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 10., 2.00184809)).sum()
#-inf
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 9.9, 2.00184809)).sum()
#-2677.3867091635661
#>>> y.min()
#12.001848089426717
#>>> np.log(stats.pareto.pdf(y, 1.03463526, loc=9.9, scale=2.00184809)).sum()
#-2677.3867091635661
#>>> np.log(stats.pareto.pdf(y, 1.03463526, loc=10., scale=2.00184809)).sum()
#-inf
#>>> stats.pareto.logpdf(y, 1.03463526, loc=10., scale=2.00184809).sum()
#-inf
#>>> stats.pareto.logpdf(y, 1.03463526, loc=9.99, scale=2.00184809).sum()
#-2631.6120098202355
#>>> mod_par.loglike((1.03463526, 9.99, 2.00184809))
#-963.2513896113644
#>>> maxabs(y, mod_par.endog)
#0.0
#>>> np.source(stats.pareto.logpdf)
#In file: C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6579.win32\Programs\Python25\Lib\site-packages\scipy\stats\distributions.py
#
# def logpdf(self, x, *args, **kwds):
# """
# Log of the probability density function at x of the given RV.
#
# This uses more numerically accurate calculation if available.
#
# Parameters
# ----------
# x : array-like
# quantiles
# arg1, arg2, arg3,... : array-like
# The shape parameter(s) for the distribution (see docstring of the
# instance object for more information)
# loc : array-like, optional
# location parameter (default=0)
# scale : array-like, optional
# scale parameter (default=1)
#
# Returns
# -------
# logpdf : array-like
# Log of the probability density function evaluated at x
#
# """
# loc,scale=map(kwds.get,['loc','scale'])
# args, loc, scale = self._fix_loc_scale(args, loc, scale)
# x,loc,scale = map(arr,(x,loc,scale))
# args = tuple(lmap(arr,args))
# x = arr((x-loc)*1.0/scale)
# cond0 = self._argcheck(*args) & (scale > 0)
# cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
# cond = cond0 & cond1
# output = empty(shape(cond),'d')
# output.fill(NINF)
# putmask(output,(1-cond0)*array(cond1,bool),self.badvalue)
# goodargs = argsreduce(cond, *((x,)+args+(scale,)))
# scale, goodargs = goodargs[-1], goodargs[:-1]
# place(output,cond,self._logpdf(*goodargs) - log(scale))
# if output.ndim == 0:
# return output[()]
# return output
#
#>>> np.source(stats.pareto._logpdf)
#In file: C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6579.win32\Programs\Python25\Lib\site-packages\scipy\stats\distributions.py
#
# def _logpdf(self, x, *args):
# return log(self._pdf(x, *args))
#
#>>> np.source(stats.pareto._pdf)
#In file: C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6579.win32\Programs\Python25\Lib\site-packages\scipy\stats\distributions.py
#
# def _pdf(self, x, b):
# return b * x**(-b-1)
#
#>>> stats.pareto.a
#1.0
#>>> (1-loc)/scale
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'loc' is not defined
#
#>>> b, loc, scale = (1.03463526, 9.99, 2.00184809)
#>>> (1-loc)/scale
#-4.4908502522786327
#>>> (x-loc)/scale == 1
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'x' is not defined
#
#>>> (lb-loc)/scale == 1
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'lb' is not defined
#
#>>> lb = scale + loc
#>>> lb
#11.991848090000001
#>>> (lb-loc)/scale == 1
#False
#>>> (lb-loc)/scale
#1.0000000000000004
#>>>
#'''
'''
repr(start_params) array([ 1., 10., 2.])
Optimization terminated successfully.
Current function value: 2626.436870
Iterations: 102
Function evaluations: 210
Optimization terminated successfully.
Current function value: 0.016555
Iterations: 16
Function evaluations: 35
[ 1.03482659 10.00737039 1.9944777 ]
(1.0596088578825995, 9.9043376069230007, 2.0975104813987118)
>>> 9.9043376069230007 + 2.0975104813987118
12.001848088321712
>>> y.min()
12.001848089426717
'''
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
0.0686702747648
0.0164150896481
0.128121386381
[ 0.10370428 0.09921315 0.09676723 0.10457413 0.10201618 0.89964496]
(array(0.0), array(1.4552599885729829), array(0.0), array(2.5072143354058221))
(array(0.0), array(1.6666666666666667), array(0.0), array(6.0))
repr(start_params) array([ 0.10370428, 0.09921315, 0.09676723, 0.10457413, 0.10201618,
0.89964496, 6.39309417, 0.12812139])
Optimization terminated successfully.
Current function value: -679.951339
Iterations: 398
Function evaluations: 609
estimation results t-dist
[ 0.10400826 0.10111893 0.09725133 0.10507788 0.10086163 0.8996041
4.72131318 0.09825355]
[ 0.00365493 0.00356149 0.00349329 0.00362333 0.003732 0.00362716
0.72329352 0.00388832]
repr(start_params) array([ 0.10400826, 0.10111893, 0.09725133, 0.10507788, 0.10086163,
0.8996041 , 4.72131318, 0.09825355])
Optimization terminated successfully.
Current function value: -679.950443
Iterations 3
using Newton
[ 0.10395383 0.10106762 0.09720665 0.10503384 0.10080599 0.89954546
4.70918964 0.09815885]
[ 0.00365299 0.00355968 0.00349147 0.00362166 0.00373015 0.00362533
0.7201488 0.00388437]
()
[ 0.09992709 0.09786601 0.09387356 0.10229919 0.09756623 0.85466272
4.60459182 0.09661986]
[ 0.11308292 0.10828401 0.1028508 0.11268895 0.10934726 0.94462721
7.15412655 0.13452746]
repr(start_params) array([ 1., 9., 2.])
Optimization terminated successfully.
Current function value: 2636.129089
Iterations: 147
Function evaluations: 279
Optimization terminated successfully.
Current function value: 0.016555
Iterations: 16
Function evaluations: 35
[ 0.84856418 10.2197801 1.78206799]
(1.0596088578825995, 9.9043376069230007, 2.0975104813987118)
12.0018480891 12.0018480883 12.0018480894
repr(start_params) array([ 1., 2.])
Warning: Desired error not necessarily achieveddue to precision loss
Current function value: 2643.549907
Iterations: 2
Function evaluations: 13
Gradient evaluations: 12
>>> res_parks2 = mod_par.fit_ks()
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2642.465273
Iterations: 92
Function evaluations: 172
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2636.639863
Iterations: 73
Function evaluations: 136
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2631.568778
Iterations: 75
Function evaluations: 133
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2627.821044
Iterations: 75
Function evaluations: 135
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2631.568778
Iterations: 75
Function evaluations: 133
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.431596
Iterations: 58
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.737426
Iterations: 60
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2627.821044
Iterations: 75
Function evaluations: 135
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.471666
Iterations: 48
Function evaluations: 94
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2627.196314
Iterations: 66
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.578538
Iterations: 56
Function evaluations: 103
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.471666
Iterations: 48
Function evaluations: 94
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.651702
Iterations: 67
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.737426
Iterations: 60
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.613505
Iterations: 73
Function evaluations: 141
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.578538
Iterations: 56
Function evaluations: 103
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.651702
Iterations: 67
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.622789
Iterations: 63
Function evaluations: 114
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.613505
Iterations: 73
Function evaluations: 141
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.627465
Iterations: 59
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.625104
Iterations: 59
Function evaluations: 108
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629829
Iterations: 66
Function evaluations: 118
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.628642
Iterations: 67
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.631023
Iterations: 68
Function evaluations: 129
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630430
Iterations: 57
Function evaluations: 108
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629598
Iterations: 60
Function evaluations: 112
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630430
Iterations: 57
Function evaluations: 108
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630130
Iterations: 65
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629536
Iterations: 62
Function evaluations: 111
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630130
Iterations: 65
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629984
Iterations: 67
Function evaluations: 123
Optimization terminated successfully.
Current function value: 0.016560
Iterations: 18
Function evaluations: 38
>>> res_parks2
(1.0592352626264809, 9.9051580457572399, 2.0966900385041591)
>>> res_parks
(1.0596088578825995, 9.9043376069230007, 2.0975104813987118)
>>> res_par.params
array([ 0.84856418, 10.2197801 , 1.78206799])
>>> np.sqrt(np.diag(mod_par.hessian(res_par.params)))
array([ NaN, NaN, NaN])
>>> mod_par.hessian(res_par.params
... )
array([[ NaN, NaN, NaN],
[ NaN, NaN, NaN],
[ NaN, NaN, NaN]])
>>> mod_par.hessian(res_parks)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 533, in hessian
return approx_hess(params, self.loglike)[0] #need options for hess (epsilon)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\regression\numdiff.py", line 118, in approx_hess
xh = x + h
TypeError: can only concatenate tuple (not "float") to tuple
>>> mod_par.hessian(np.array(res_parks))
array([[ NaN, NaN, NaN],
[ NaN, NaN, NaN],
[ NaN, NaN, NaN]])
>>> mod_par.fixed_params
array([ NaN, 9.90510677, NaN])
>>> mod_par.fixed_params=None
>>> mod_par.hessian(np.array(res_parks))
array([[-890.48553491, NaN, NaN],
[ NaN, NaN, NaN],
[ NaN, NaN, NaN]])
>>> mod_par.loglike(np.array(res_parks))
-2626.6322080820569
>>> mod_par.bsejac
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\decorators.py", line 85, in __get__
_cachedval = self.fget(obj)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 592, in bsejac
return np.sqrt(np.diag(self.covjac))
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\decorators.py", line 85, in __get__
_cachedval = self.fget(obj)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 574, in covjac
jacv = self.jacv
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\decorators.py", line 85, in __get__
_cachedval = self.fget(obj)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 557, in jacv
return self.jac(self._results.params)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 530, in jac
return approx_fprime1(params, self.loglikeobs, **kwds)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\regression\numdiff.py", line 80, in approx_fprime1
f0 = f(*((xk,)+args))
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 522, in loglikeobs
return -self.nloglikeobs(params)
File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\examples\ex_generic_mle_tdist.py", line 184, in nloglikeobs
scale = params[2]
IndexError: index out of bounds
>>> hasattr(self, 'start_params')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> hasattr(mod_par, 'start_params')
True
>>> mod_par.start_params
array([ 1., 2.])
>>> stats.pareto.stats(1., 9., 2., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(1., 8., 2., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(1., 8., 1., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(1., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(0.5., moments='mvsk')
File "<stdin>", line 1
stats.pareto.stats(0.5., moments='mvsk')
^
SyntaxError: invalid syntax
>>> stats.pareto.stats(0.5, moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(2, moments='mvsk')
(array(2.0), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(10, moments='mvsk')
(array(1.1111111111111112), array(0.015432098765432098), array(2.8110568859997356), array(14.828571428571429))
>>> stats.pareto.rvs(10, size=10)
array([ 1.07716265, 1.18977526, 1.07093 , 1.05157081, 1.15991232,
1.31015589, 1.06675107, 1.08082475, 1.19501243, 1.34967158])
>>> r = stats.pareto.rvs(10, size=1000)
>>> plt
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'plt' is not defined
>>> import matplotlib.pyplot as plt
>>> plt.hist(r)
(array([962, 32, 3, 2, 0, 0, 0, 0, 0, 1]), array([ 1.00013046, 1.3968991 , 1.79366773, 2.19043637, 2.587205 ,
2.98397364, 3.38074227, 3.77751091, 4.17427955, 4.57104818,
4.96781682]), <a list of 10 Patch objects>)
>>> plt.show()
'''
| bsd-3-clause |
quheng/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
fako/datascope | src/future_fashion/management/commands/match_image_colors.py | 1 | 6593 | import os
import shutil
import logging
import json
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from colorz import order_by_hue
from django.core.files.storage import default_storage
from core.management.commands import CommunityCommand
from core.utils.configuration import DecodeConfigAction
from sources.models import ImageDownload
from future_fashion.colors import (extract_dominant_colors, get_vector_from_colors, get_colors_frame,
get_colors_individual)
log = logging.getLogger("datascope")
class Command(CommunityCommand):
"""
Example: ./manage.py match_image_colors ClothingDataCommunity -i ~/Downloads/fairy-tale.jpg -a tagged_kleding
"""
def add_arguments(self, parser):
parser.add_argument('community', type=str, nargs="?", default=self.community_model)
parser.add_argument('-a', '--args', type=str, nargs="*", default="")
parser.add_argument('-c', '--config', type=str, action=DecodeConfigAction, nargs="?", default={})
parser.add_argument('-i', '--image', type=str)
parser.add_argument('-n', '--number-colors', type=int, default=3)
parser.add_argument('-s', '--similarity', action='store_true')
def handle_inventory_matches(self, matches, destination):
for ix, match_info in enumerate(matches):
similarity, match = match_info
name, ext = os.path.splitext(match["path"])
shutil.copy2(
match["path"],
os.path.join(destination, str(ix) + "-" + str(round(similarity, ndigits=3)) + ext)
)
def handle_data_matches(self, matches, destination):
for ix, match_info in enumerate(matches):
similarity, match = match_info
uri = ImageDownload.uri_from_url(match["image"])
try:
download = ImageDownload.objects.get(uri=uri)
except ImageDownload.DoesNotExist:
continue
if not download.success:
continue
name, ext = os.path.splitext(download.body)
shutil.copy2(
os.path.join(default_storage.location, download.body),
os.path.join(destination, str(ix) + "-" + str(round(similarity, ndigits=3)) + ext)
)
def get_similarity_matches(self, colors, content, num_colors):
colors = order_by_hue(colors)
vector = get_vector_from_colors(colors)
colors_frame = get_colors_frame(content, num_colors=num_colors, by_hue=True)
log.info("Color frame shape: {}".format(colors_frame.shape))
similarity = cosine_similarity(colors_frame, np.array(vector).reshape(1, -1)).flatten()
# Find indices for ten most similar objects and sort by most similar
indices = np.argsort(similarity)[-10:]
matches = [(similarity[ix], content[ix],) for ix in indices]
matches.reverse()
return matches
def get_prominent_matches(self, colors, content, num_colors):
vector = get_vector_from_colors(colors)
colors_frame = get_colors_frame(content, num_colors=num_colors)
log.info("Color frame shape: {}".format(colors_frame.shape))
for num in range(0, num_colors):
color_vector = vector[num:num+3]
color_columns = colors_frame.columns[num:num+3]
color_similarity = cosine_similarity(colors_frame.loc[:,color_columns], np.array(color_vector).reshape(1, -1)).flatten()
indices = np.argsort(color_similarity)
cut_ix = next((num for num, ix in enumerate(indices[::-1]) if color_similarity[ix] < 0.99), None)
if cut_ix is None:
log.info("Terminating match at color: {}".format(num))
break
colors_frame = colors_frame.iloc[indices[-1 * cut_ix:]]
else:
log.info("Taking all {} colors into account".format(num_colors))
indices = list(colors_frame.index.values)
matches = [(prio, content[ix],) for prio, ix in enumerate(indices)]
matches.reverse()
return matches
def handle_community(self, community, *args, **options):
# Read from options
num_colors = options["number_colors"]
image = options["image"]
similarity = options["similarity"]
# Get colors from input file
main_colors, balance = extract_dominant_colors(image, num=num_colors)
# Get colors from community data
# This loads all data into memory
content = list(community.kernel.content)
if similarity:
matches = self.get_similarity_matches(main_colors, content, num_colors)
else:
matches = self.get_prominent_matches(main_colors, content, num_colors)
# Create directory for input and copy matches there
basename = os.path.basename(image)
name, ext = os.path.splitext(basename)
dest = os.path.join(default_storage.location, community.get_name(), "colorz", name)
if not os.path.exists(dest):
os.makedirs(dest, exist_ok=True)
shutil.copy2(image, os.path.join(dest, basename))
color_data = {
"input": {
"colors": [
"#{0:02x}{1:02x}{2:02x}".format(color[0], color[1], color[2])
for color in main_colors
],
"links": [
"http://www.color-hex.com/color/{0:02x}{1:02x}{2:02x}".format(color[0], color[1], color[2])
for color in main_colors
]
},
"output": [
{
"similarity": round(similarity, ndigits=3),
"colors": [
"#{0:02x}{1:02x}{2:02x}".format(color[0], color[1], color[2])
for color in get_colors_individual(match, num_colors=num_colors, space="rgb")
],
"links": [
"http://www.color-hex.com/color/{0:02x}{1:02x}{2:02x}".format(color[0], color[1], color[2])
for color in get_colors_individual(match, num_colors=num_colors, space="rgb")
]
}
for similarity, match in matches
]
}
with open(os.path.join(dest, "colors.js"), "w") as jf:
json.dump(color_data, jf, indent=4)
if community.get_name() == "fashion_data":
self.handle_data_matches(matches, dest)
else:
self.handle_inventory_matches(matches, dest)
| gpl-3.0 |
CopyChat/Plotting | Python/climate_change/swio_changes_pr_rcp85.py | 1 | 11377 | #!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import math
import numpy as np
import pylab as pl
import Scientific.IO.NetCDF as IO
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.lines as lines
from matplotlib.colors import LinearSegmentedColormap
import textwrap
pl.close('all')
########################## for CMIP5 charactors
DIR='/Users/tang/climate/CMIP5/monthly/pr/'
VARIABLE='pr'
PRODUCT='Amon'
ENSEMBLE='r1i1p1'
#AbsTemp=273.15
AbsTemp=0
RefTemp=0.0000001
# RefTemp, initialise , could be any value
CRUmean=8.148 #1900-2100 land
TargetModel=[\
#'CanESM2',\
#'BCC-CSM1.1',\
#'CCSM4',\
#'CNRM-CM5',\
#'CSIRO-Mk3.6.0',\
#'EC-EARTH',\
#'GFDL-ESM2G',\
#'GFDL-ESM2M',\
#'GISS-E2-H',\
#'GISS-E2-R',\
#'HadGEM2-CC',\
'HadGEM2-ES',\
#'INM-CM4',\
#'IPSL-CM5A-LR',\
#'IPSL-CM5A-MR',\
#'MIROC-ESM-CHEM',\
#'MIROC-ESM',\
#'MIROC5',\
#'MPI-ESM-LR',\
#'MRI-CGCM3',\
'NorESM1-M',\
]
COLORtar=['darkred','blue','deeppink','orange',\
'orangered','yellow','gold','brown','chocolate',\
'green','yellowgreen','aqua','olive','teal',\
'blue','purple','darkmagenta','fuchsia','indigo',\
'dimgray','black','navy']
COLORall=['darkred','darkblue','darkgreen','deeppink',\
'red','blue','green','pink','gold',\
'lime','lightcyan','orchid','yellow','lightsalmon',\
'brown','khaki','aquamarine','yellowgreen','blueviolet',\
'snow','skyblue','slateblue','orangered','dimgray',\
'chocolate','teal','mediumvioletred','gray','cadetblue',\
'mediumorchid','bisque','tomato','hotpink','firebrick',\
'Chartreuse','purple','goldenrod',\
'black','orangered','cyan','magenta']
linestyles=['_', '_', '_', '-', '-',\
'-', '--','--','--', '--',\
'_', '_','_','_',\
'_', '_','_','_',\
'_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':']
#================================================ CMIP5 models
# for rcp8.5
# ls -l | awk '{printf "999%s998,\\\n",$NF}' | sort -n
modelist2=[\
'ACCESS1-0',\
'ACCESS1-3',\
'BNU-ESM',\
'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CMCC-CM',\
'CMCC-CMS',\
'CNRM-CM5',\
'CSIRO-Mk3-6-0',\
'CanESM2',\
'EC-EARTH',\
'FIO-ESM',\
'GFDL-CM3',\
'GFDL-ESM2G',\
'GFDL-ESM2M',\
'GISS-E2-R',\
'HadGEM2-AO',\
'HadGEM2-CC',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'IPSL-CM5B-LR',\
'MIROC-ESM-CHEM',\
'MIROC-ESM',\
'MIROC5',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'MRI-CGCM3',\
'NorESM1-M',\
'NorESM1-ME',\
'bcc-csm1-1-m',\
'bcc-csm1-1',\
'inmcm4',\
]
print "==============================================="
#=================================================== define the Plot:
fig1=plt.figure(figsize=(16,9))
ax = fig1.add_subplot(111)
plt.xlabel('Year',fontsize=16)
#plt.ylabel('SWIO Surface Downwelling Solar Radiation Change (W/m2)',fontsize=16)
#plt.ylabel('SWIO Changes ($^\circ$C)',fontsize=16)
plt.ylabel('SWIO precipitation Changes (mm/day)',fontsize=16)
#plt.title("SWIO Surface Tempereture Changes simulated by CMIP5 models",fontsize=18)
plt.title("SWIO Precipitation Changes simulated by CMIP5 models",fontsize=18)
#plt.title('Global Surface Downwelling Solar Radiation Changes simulated by CMIP5 models (W/m2)',fontsize=18)
# vertical range ylim yrange
plt.ylim(-0.5,0.5)
plt.xlim(1961,2099)
plt.grid()
plt.xticks(np.arange(1960, 2090+10, 20))
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
#=================================================== 3 windows
plt.axvspan(1996, 2005, alpha=0.3, color='red')
plt.axvspan(2046, 2055, alpha=0.3, color='red')
plt.axvspan(2090, 2099, alpha=0.3, color='red')
#=================================================== 3 windows
plt.axvline(x=2005,linewidth=2, color='gray')
plt.axhline(y=0,linewidth=2, color='gray')
#plt.plot(x,y,color="blue",linewidth=4)
########################## for historical
########################## for historical
print "========== for rcp85 ==============="
EXPERIMENT='historical-rcp85'
PRODUCT='Amon'
ENSEMBLE='r1i1p1'
TIME='196101-209912'
filetag="swiomean"
YEAR=range(1961,2100)
Nmonth=1668
SumTemp=np.zeros(Nmonth/12)
K=0
for Model in modelist2:
#define the K-th model input file:
K=K+1 # for average
infile1=DIR+'rcp8.5'+'/'+Model+'/'\
+VARIABLE+'_'+PRODUCT+'_'+Model+'_'+EXPERIMENT+'_'+ENSEMBLE+'_'+TIME+'.'+filetag+'.nc'
#an example: tas_Amon_CanESM2_historical-rcp85_r1i1p1_200601-210012.globalmean.nc & \
#this file was copied locally for tests in this book
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
print 'the variable tas ===============: '
print TAS
# to change units to mm/day
TAS=TAS*86400
# calculate the annual mean temp:
TEMP=range(0,Nmonth,12)
for j in range(0,Nmonth,12):
TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
print TEMP
# reference temp: mean of 1996-2005
RefTemp=np.mean(TEMP[len(TEMP)-94-10+1:len(TEMP)-94])
if Model=='HadGEM2-ES':
HadRefTemp=RefTemp
if Model=='GFDL-ESM2M':
GFDLRefTemp=RefTemp
if K==1:
ArrRefTemp=[RefTemp]
else:
ArrRefTemp=ArrRefTemp+[RefTemp]
print 'ArrRefTemp ========== ',ArrRefTemp
TEMP=[t-RefTemp for t in TEMP]
print " temp ======================== relative to mean of 1986-2005"
print TEMP
##quit()
# for std
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
#quit()
#plot only target models
if Model in TargetModel:
plt.plot(YEAR,TEMP,\
label=Model,\
#linestyles[TargetModel.index(Model)],\
color=COLORtar[TargetModel.index(Model)],\
linewidth=2)
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print modelist2
plt.plot(YEAR,AveTemp,label='ensemble mean',color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='black',alpha=0.3)
# draw NO. of model used:
plt.text(2015,2,str(K)+' models',size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
)
#=================================================== put downscaling data:
# for Had
Had_dir1="/Users/tang/climate/Modeling/333/Had.G71E0001/output/pprcmdata/monthly/"
Had_dir2="/Users/tang/climate/Modeling/333/Had.G71E0001/output.RCP85.2044-2055/pprcmdata/monthly/"
Had_dir3="/Users/tang/climate/Modeling/333/Had.G71E0001/output.RCP85.2088-2100/pprcmdata/monthly/"
infile1=Had_dir1+'Had_hist.SRF.all.year.fldmean.1996-2005.nc'
infile2=Had_dir2+'Had_rcp85.SRF.all.year.fldmean.2046-2055.nc'
infile3=Had_dir3+'Had_rcp85.SRF.all.year.fldmean.2090-2099.nc'
YEAR1=range(1996,2006)
YEAR2=range(2046,2056)
YEAR3=range(2090,2100)
#open input files
infile01=IO.NetCDFFile(infile1,'r')
infile02=IO.NetCDFFile(infile2,'r')
infile03=IO.NetCDFFile(infile3,'r')
print infile01.variables.keys()
print infile02.variables.keys()
print infile03.variables.keys()
# read the variable tas
TAS1=infile01.variables[VARIABLE][:].copy()
TAS2=infile02.variables[VARIABLE][:].copy()
TAS3=infile03.variables[VARIABLE][:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
TEMP1=range(0,9)
TEMP2=range(0,9)
TEMP3=range(0,9)
TEMP1=TAS1[:,0,0]*86400 - AbsTemp - HadRefTemp
TEMP2=TAS2[:,0,0]*86400 - AbsTemp - HadRefTemp
TEMP3=TAS3[:,0,0]*86400 - AbsTemp - HadRefTemp
#print " temp ======================== absolut"
#print TEMP
plt.plot(YEAR1,TEMP1,color="blue",linewidth=4)
plt.plot(YEAR2,TEMP2,color="blue",linewidth=4)
plt.plot(YEAR3,TEMP3,color="blue",linewidth=4)
#===================================================
# for GFDL
GFDL_dir1="/Users/tang/climate/Modeling/333/GFDL.G71E0001/output/pprcmdata/monthly/"
GFDL_dir2="/Users/tang/climate/Modeling/333/GFDL.G71E0001/output.RCP85.2044-2055/pprcmdata/monthly/"
GFDL_dir3="/Users/tang/climate/Modeling/333/GFDL.G71E0001/output.RCP85.2088-2100/pprcmdata/monthly/"
infile1=GFDL_dir1+'GFDL_hist.SRF.all.year.fldmean.1996-2005.nc'
infile2=GFDL_dir2+'GFDL_rcp85.SRF.all.year.fldmean.2046-2055.nc'
infile3=GFDL_dir3+'GFDL_rcp85.SRF.all.year.fldmean.2090-2099.nc'
YEAR1=range(1996,2006)
YEAR2=range(2046,2055)
YEAR3=range(2090,2100)
#open input files
infile01=IO.NetCDFFile(infile1,'r')
infile02=IO.NetCDFFile(infile2,'r')
infile03=IO.NetCDFFile(infile3,'r')
print infile01.variables.keys()
print infile02.variables.keys()
print infile03.variables.keys()
# read the variable tas
TAS1=infile01.variables[VARIABLE][:].copy()
TAS2=infile02.variables[VARIABLE][:].copy()
TAS3=infile03.variables[VARIABLE][:].copy()
#print 'the variable tas ===============: '
#print TAS
print AbsTemp
# calculate the annual mean temp:
TEMP1=range(0,9)
TEMP2=range(0,9)
TEMP3=range(0,9)
TEMP1=TAS1[:,0,0]*86400 - AbsTemp - GFDLRefTemp
TEMP2=TAS2[:,0,0]*86400 - AbsTemp - GFDLRefTemp
TEMP3=TAS3[:,0,0]*86400 - AbsTemp - GFDLRefTemp
#print " temp ======================== absolut"
#print TEMP
plt.plot(YEAR1,TEMP1,color="darkred",linewidth=4)
print len(YEAR2)
print len(TEMP2)
plt.plot(YEAR2,TEMP2,color="darkred",linewidth=4)
plt.plot(YEAR3,TEMP3,color="darkred",linewidth=4)
print "==============================================="
print TEMP1
print TEMP2
print TEMP3
print GFDLRefTemp
plt.legend(loc=2)
plt.show()
quit()
| gpl-3.0 |
f3r/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 22 | 1848 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
silky/sms-tools | software/models_interface/stochasticModel_function.py | 2 | 2017 | # function to call the main analysis/synthesis functions in software/models/stochasticModel.py
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.signal import get_window
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import stochasticModel as STM
def main(inputFile='../../sounds/ocean.wav', H=256, N=512, stocf=.1):
"""
inputFile: input sound file (monophonic with sampling rate of 44100)
H: hop size, N: fft size
stocf: decimation factor used for the stochastic approximation (bigger than 0, maximum 1)
"""
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute stochastic model
stocEnv = STM.stochasticModelAnal(x, H, N, stocf)
# synthesize sound from stochastic model
y = STM.stochasticModelSynth(stocEnv, H, N)
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_stochasticModel.wav'
# write output sound
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot stochastic representation
plt.subplot(3,1,2)
numFrames = int(stocEnv[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(stocf*(N/2+1))*float(fs)/(stocf*N)
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv))
plt.autoscale(tight=True)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.title('stochastic approximation')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.tight_layout()
plt.show(block=False)
if __name__ == "__main__":
main()
| agpl-3.0 |
kmiddleton/ThinkStats2 | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
glennq/scikit-learn | sklearn/model_selection/tests/test_validation.py | 3 | 32928 | """Test the validation module"""
from __future__ import division
import sys
import warnings
import tempfile
import os
from time import sleep
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection.tests.test_split import MockClassifier
try:
WindowsError
except NameError:
WindowsError = None
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y2, scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y2)
def test_cross_val_score_predict_groups():
# Check if ValueError (when groups is None) propagates to cross_val_score
# and cross_val_predict
# And also check if groups is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
assert_raise_message(ValueError,
"The groups parameter should not be None",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The groups parameter should not be None",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
# 3 fold cross val is used so we need atleast 3 samples per class
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_group, _, pvalue_group = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_group, _, pvalue_group = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) /
y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, groups=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
iris = load_iris()
X, y = iris.data, iris.target
X_sparse = coo_matrix(X)
multioutput_y = np.column_stack([y, y[::-1]])
clf = Ridge(fit_intercept=False, random_state=0)
# 3 fold cv is used --> atleast 3 samples per class
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_equal(predictions.shape, (150, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_array_equal(predictions.shape, (150, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (150,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
for shuffle_train in [False, True]:
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
for shuffle_train in [False, True]:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10), shuffle=shuffle_train)
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_splits=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_with_shuffle():
# Following test case was designed this way to verify the code
# changes made in pull request: #7506.
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [11, 12], [13, 14], [15, 16],
[17, 18], [19, 20], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18]])
y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4])
groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4])
# Splits on these groups fail without shuffle as the first iteration
# of the learning curve doesn't contain label 4 in the training set.
estimator = PassiveAggressiveClassifier(shuffle=False)
cv = GroupKFold(n_splits=2)
train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2)
assert_array_almost_equal(train_scores_batch.mean(axis=1),
np.array([0.75, 0.3, 0.36111111]))
assert_array_almost_equal(test_scores_batch.mean(axis=1),
np.array([0.36111111, 0.25, 0.25]))
assert_raises(ValueError, learning_curve, estimator, X, y, cv=cv, n_jobs=1,
train_sizes=np.linspace(0.3, 1.0, 3), groups=groups)
train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2,
exploit_incremental_learning=True)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_check_is_permutation():
rng = np.random.RandomState(0)
p = np.arange(100)
rng.shuffle(p)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
# Check if the additional duplicate indices are caught
assert_false(_check_is_permutation(np.hstack((p, 0)), 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
def test_cross_val_predict_with_method():
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=0)
classes = len(set(y))
kfold = KFold(len(iris.target))
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
est = LogisticRegression()
predictions = cross_val_predict(est, X, y, method=method)
assert_equal(len(predictions), len(y))
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
# Naive loop (should be same as cross_val_predict):
for train, test in kfold.split(X, y):
est.fit(X[train], y[train])
expected_predictions[test] = func(X[test])
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold)
assert_array_almost_equal(expected_predictions, predictions)
def test_score_memmap():
# Ensure a scalar score of memmap type is accepted
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
tf.write(b'Hello world!!!!!')
tf.close()
scores = np.memmap(tf.name, dtype=np.float64)
score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
try:
cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
# non-scalar should still fail
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=lambda est, X, y: scores)
finally:
# Best effort to release the mmap file handles before deleting the
# backing file under Windows
scores, score = None, None
for _ in range(3):
try:
os.unlink(tf.name)
break
except WindowsError:
sleep(1.)
| bsd-3-clause |
VirusTotal/msticpy | tests/test_cmd_line.py | 1 | 2043 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import pandas as pd
from pytest import raises
from msticpy.common.exceptions import MsticpyException
from msticpy.nbtools.entityschema import Host
from msticpy.sectools import cmd_line as cl
_test_data_folders = [
d for d, _, _ in os.walk(os.getcwd()) if d.endswith("/tests/testdata")
]
if len(_test_data_folders) == 1:
_TEST_DATA = _test_data_folders[0]
else:
_TEST_DATA = "./tests/testdata"
def test_risky_cmd_line():
input_file = os.path.join(_TEST_DATA, "sudo_data.csv")
input_df = pd.read_csv(input_file)
output = cl.risky_cmd_line(events=input_df, log_type="Syslog")
assert len(output) >= 1 # nosec
assert type(output) == dict # nosec
assert output["2019-07-05T18:19:52.873Z"] == "/bin/bash" # nosec
with raises(MsticpyException):
cl.risky_cmd_line(events=input_df, log_type="Syslog", cmd_field="Test")
def test_cmd_speed():
input_file = os.path.join(_TEST_DATA, "sudo_data_speed.csv")
input_df = pd.read_csv(input_file, parse_dates=["TimeGenerated"])
output = cl.cmd_speed(cmd_events=input_df, cmd_field="Command")
assert len(output) >= 1 # nosec
assert type(output[0]) == dict # nosec
with raises(MsticpyException):
output = cl.cmd_speed(cmd_events=input_df, cmd_field="Test")
def test_syslog_risky_actions():
input_file = os.path.join(_TEST_DATA, "syslog_data.csv")
input_df = pd.read_csv(input_file)
risky_stuff = os.path.join(_TEST_DATA, "risky_stuff_custom.json")
output = cl.risky_cmd_line(
events=input_df,
log_type="Syslog",
cmd_field="SyslogMessage",
detection_rules=risky_stuff,
)
assert len(output) >= 1 # nosec
assert type(output) == dict # nosec
| mit |
datapythonista/pandas | pandas/tests/series/methods/test_astype.py | 2 | 19480 | from datetime import (
datetime,
timedelta,
)
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Series,
Timedelta,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_astype(self):
# see GH#14878
ser = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.astype(np.float64, errors=False)
ser.astype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see GH#7271
ser = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
ser.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.astype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.astype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see GH#4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_float_to_period(self):
result = Series([np.nan]).astype("period[D]")
expected = Series([NaT], dtype="period[D]")
tm.assert_series_equal(result, expected)
def test_astype_no_pandas_dtype(self):
# https://github.com/pandas-dev/pandas/pull/24866
ser = Series([1, 2], dtype="int64")
# Don't have PandasDtype in the public API, so we use `.array.dtype`,
# which is a PandasDtype.
result = ser.astype(ser.array.dtype)
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see GH#15524, GH#15987
data = [1]
s = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
def test_astype_dt64_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti = date_range("2012-01-01", periods=3)
result = Series(dti).astype(str)
expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_dt64tz_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
result = Series(dti_tz).astype(str)
expected = Series(
[
"2012-01-01 00:00:00-05:00",
"2012-01-02 00:00:00-05:00",
"2012-01-03 00:00:00-05:00",
],
dtype=object,
)
tm.assert_series_equal(result, expected)
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz astype deprecated
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see GH#9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series(["2010-01-04"])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series(["2010-01-04 00:00:00-05:00"])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see GH#9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series(["1 days"])
tm.assert_series_equal(ser, expected)
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize(
"data, dtype",
[
(["x", "y", "z"], "string"),
pytest.param(
["x", "y", "z"],
"arrow_string",
marks=td.skip_if_no("pyarrow", min_version="1.0.0"),
),
(["x", "y", "z"], "category"),
(3 * [Timestamp("2020-01-01", tz="UTC")], None),
(3 * [Interval(0, 1)], None),
],
)
@pytest.mark.parametrize("errors", ["raise", "ignore"])
def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors):
# https://github.com/pandas-dev/pandas/issues/35471
from pandas.core.arrays.string_arrow import ArrowStringDtype # noqa: F401
ser = Series(data, dtype=dtype)
if errors == "ignore":
expected = ser
result = ser.astype(float, errors="ignore")
tm.assert_series_equal(result, expected)
else:
msg = "(Cannot cast)|(could not convert)"
with pytest.raises((ValueError, TypeError), match=msg):
ser.astype(float, errors=errors)
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
def test_astype_from_float_to_str(self, dtype):
# https://github.com/pandas-dev/pandas/issues/36451
s = Series([0.1], dtype=dtype)
result = s.astype(str)
expected = Series(["0.1"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"value, string_value",
[
(None, "None"),
(np.nan, "nan"),
(NA, "<NA>"),
],
)
def test_astype_to_str_preserves_na(self, value, string_value):
# https://github.com/pandas-dev/pandas/issues/36904
s = Series(["a", "b", value], dtype=object)
result = s.astype(str)
expected = Series(["a", "b", string_value], dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
tm.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_unicode(self):
# see GH#7758: A bit of magic is required to set
# default encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series(["データーサイエンス、お前はもう死んでいる"]),
]
former_encoding = None
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series(["野菜食べないとやばい".encode()]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(str)
tm.assert_series_equal(res, expec)
# Restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
def test_astype_bytes(self):
# GH#39474
result = Series(["foo", "bar", "baz"]).astype(bytes)
assert result.dtypes == np.dtype("S3")
class TestAstypeString:
@pytest.mark.parametrize(
"data, dtype",
[
([True, NA], "boolean"),
(["A", NA], "category"),
(["2020-10-10", "2020-10-10"], "datetime64[ns]"),
(["2020-10-10", "2020-10-10", NaT], "datetime64[ns]"),
(
["2012-01-01 00:00:00-05:00", NaT],
"datetime64[ns, US/Eastern]",
),
([1, None], "UInt16"),
(["1/1/2021", "2/1/2021"], "period[M]"),
(["1/1/2021", "2/1/2021", NaT], "period[M]"),
(["1 Day", "59 Days", NaT], "timedelta64[ns]"),
# currently no way to parse IntervalArray from a list of strings
],
)
def test_astype_string_to_extension_dtype_roundtrip(self, data, dtype, request):
if dtype == "boolean" or (
dtype in ("period[M]", "datetime64[ns]", "timedelta64[ns]") and NaT in data
):
mark = pytest.mark.xfail(
reason="TODO StringArray.astype() with missing values #GH40566"
)
request.node.add_marker(mark)
# GH-40351
s = Series(data, dtype=dtype)
tm.assert_series_equal(s, s.astype("string").astype(dtype))
class TestAstypeCategorical:
def test_astype_categorical_to_other(self):
cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
ser = Series(np.random.RandomState(0).randint(0, 10000, 100)).sort_values()
ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
expected = ser
tm.assert_series_equal(ser.astype("category"), expected)
tm.assert_series_equal(ser.astype(CategoricalDtype()), expected)
msg = r"Cannot cast object dtype to float64"
with pytest.raises(ValueError, match=msg):
ser.astype("float64")
cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
tm.assert_series_equal(cat.astype("str"), exp)
s2 = Series(Categorical(["1", "2", "3", "4"]))
exp2 = Series([1, 2, 3, 4]).astype("int")
tm.assert_series_equal(s2.astype("int"), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(ser.values), name="value_group")
cmp(ser.astype("object"), expected)
cmp(ser.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(ser), np.array(ser.values))
tm.assert_series_equal(ser.astype("category"), ser)
tm.assert_series_equal(ser.astype(CategoricalDtype()), ser)
roundtrip_expected = ser.cat.set_categories(
ser.cat.categories.sort_values()
).cat.remove_unused_categories()
result = ser.astype("object").astype("category")
tm.assert_series_equal(result, roundtrip_expected)
result = ser.astype("object").astype(CategoricalDtype())
tm.assert_series_equal(result, roundtrip_expected)
def test_astype_categorical_invalid_conversions(self):
# invalid conversion (these are NOT a dtype)
cat = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
ser = Series(np.random.randint(0, 10000, 100)).sort_values()
ser = cut(ser, range(0, 10500, 500), right=False, labels=cat)
msg = (
"dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
"not understood"
)
with pytest.raises(TypeError, match=msg):
ser.astype(Categorical)
with pytest.raises(TypeError, match=msg):
ser.astype("object").astype(Categorical)
def test_astype_categoricaldtype(self):
s = Series(["a", "b", "a"])
result = s.astype(CategoricalDtype(["a", "b"], ordered=True))
expected = Series(Categorical(["a", "b", "a"], ordered=True))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b"], ordered=False))
expected = Series(Categorical(["a", "b", "a"], ordered=False))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b", "c"], ordered=False))
expected = Series(
Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False)
)
tm.assert_series_equal(result, expected)
tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"]))
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("series_ordered", [True, False])
def test_astype_categorical_to_categorical(
self, name, dtype_ordered, series_ordered
):
# GH#10696, GH#18593
s_data = list("abcaacbab")
s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered)
s = Series(s_data, dtype=s_dtype, name=name)
# unspecified categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = s.astype(dtype)
exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered)
expected = Series(s_data, name=name, dtype=exp_dtype)
tm.assert_series_equal(result, expected)
# different categories
dtype = CategoricalDtype(list("adc"), dtype_ordered)
result = s.astype(dtype)
expected = Series(s_data, name=name, dtype=dtype)
tm.assert_series_equal(result, expected)
if dtype_ordered is False:
# not specifying ordered, so only test once
expected = s
result = s.astype("category")
tm.assert_series_equal(result, expected)
def test_astype_bool_missing_to_categorical(self):
# GH-19182
s = Series([True, False, np.nan])
assert s.dtypes == np.object_
result = s.astype(CategoricalDtype(categories=[True, False]))
expected = Series(Categorical([True, False, np.nan], categories=[True, False]))
tm.assert_series_equal(result, expected)
def test_astype_categories_raises(self):
# deprecated GH#17636, removed in GH#27141
s = Series(["a", "b", "a"])
with pytest.raises(TypeError, match="got an unexpected"):
s.astype("category", categories=["a", "b"], ordered=True)
@pytest.mark.parametrize("items", [["a", "b", "c", "a"], [1, 2, 3, 1]])
def test_astype_from_categorical(self, items):
ser = Series(items)
exp = Series(Categorical(items))
res = ser.astype("category")
tm.assert_series_equal(res, exp)
def test_astype_from_categorical_with_keywords(self):
# with keywords
lst = ["a", "b", "c", "a"]
ser = Series(lst)
exp = Series(Categorical(lst, ordered=True))
res = ser.astype(CategoricalDtype(None, ordered=True))
tm.assert_series_equal(res, exp)
exp = Series(Categorical(lst, categories=list("abcdef"), ordered=True))
res = ser.astype(CategoricalDtype(list("abcdef"), ordered=True))
tm.assert_series_equal(res, exp)
| bsd-3-clause |
Linaf/Convae-Destin | scripts/convae_destin_7.py | 3 | 18178 | """Stacked fixed noise dConvAE test"""
"""
+ Using GCN with ZCA whitened data
+ Nesterov momentum with constant value=0.9
+ Decreading noise levels initialized within range (0.5,0.9)
and then decreased
+ variable learning rate
"""
import sys
sys.path.append("..")
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
import time
import theano
import theano.tensor as T
import scae_destin.datasets as ds
from scae_destin.fflayers import ReLULayer
from scae_destin.fflayers import SoftmaxLayer
from scae_destin.convnet import ReLUConvLayer
from scae_destin.convnet import SigmoidConvLayer
from scae_destin.model import ConvAutoEncoder
from scae_destin.convnet import MaxPoolingSameSize, MaxPooling
from scae_destin.convnet import Flattener
from scae_destin.model import FeedForward
from scae_destin.optimize import gd_updates
from scae_destin.cost import mean_square_cost
from scae_destin.cost import categorical_cross_entropy_cost
from scae_destin.cost import L2_regularization
start_time=time.time()
n_epochs=100;
batch_size=100;
nkerns=64;
Xtr, Ytr, Xte, Yte=ds.load_CIFAR10_Processed("../data/train.npy",
"../data/train.pkl",
"../data/test.npy",
"../data/test.pkl");
Xtr=Xtr.reshape(50000, 3, 32, 32).transpose(0, 2, 3, 1).mean(3);
Xte=Xte.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).mean(3);
Xtrain=Xtr.reshape(Xtr.shape[0], Xtr.shape[1]*Xtr.shape[2])
Xtest=Xte.reshape(Xte.shape[0], Xte.shape[1]*Xte.shape[2])
Ytr=Ytr.flatten()
Yte=Yte.flatten()
train_set_x, train_set_y=ds.shared_dataset((Xtrain, Ytr));
test_set_x, test_set_y=ds.shared_dataset((Xtest, Yte));
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size;
print "[MESSAGE] The data is loaded"
################################## FIRST LAYER #######################################
X=T.matrix("data")
y=T.ivector("label")
idx=T.lscalar()
corruption_level=T.fscalar()
images=X.reshape((batch_size, 1, 32, 32))
layer_0_en=ReLUConvLayer(filter_size=(4,4),
num_filters=128,
num_channels=1,
fm_size=(32,32),
batch_size=batch_size,
border_mode="same")
layer_0_de=SigmoidConvLayer(filter_size=(4,4),
num_filters=1,
num_channels=128,
fm_size=(32,32),
batch_size=batch_size,
border_mode="same")
layer_1_en=ReLUConvLayer(filter_size=(2,2),
num_filters=128,
num_channels=128,
fm_size=(8,8),
batch_size=batch_size,
border_mode="same")
layer_1_de=SigmoidConvLayer(filter_size=(2,2),
num_filters=128,
num_channels=128,
fm_size=(8,8),
batch_size=batch_size,
border_mode="same")
layer_2_en=ReLUConvLayer(filter_size=(2,2),
num_filters=128,
num_channels=128,
fm_size=(4,4),
batch_size=batch_size,
border_mode="same")
layer_2_de=SigmoidConvLayer(filter_size=(2,2),
num_filters=128,
num_channels=128,
fm_size=(4,4),
batch_size=batch_size,
border_mode="same")
layer_3_en=ReLUConvLayer(filter_size=(2,2),
num_filters=128,
num_channels=128,
fm_size=(2,2),
batch_size=batch_size,
border_mode="same")
layer_3_de=SigmoidConvLayer(filter_size=(2,2),
num_filters=128,
num_channels=128,
fm_size=(2,2),
batch_size=batch_size,
border_mode="same")
layer_4_en=ReLUConvLayer(filter_size=(1,1),
num_filters=128,
num_channels=128,
fm_size=(1,1),
batch_size=batch_size,
border_mode="same")
layer_4_de=SigmoidConvLayer(filter_size=(1,1),
num_filters=128,
num_channels=128,
fm_size=(1,1),
batch_size=batch_size,
border_mode="same")
# layer_0
model_0=ConvAutoEncoder(layers=[layer_0_en, MaxPoolingSameSize(pool_size=(4,4)), layer_0_de])
out_0=model_0.fprop(images, corruption_level=corruption_level)
cost_0=mean_square_cost(out_0[-1], images)+L2_regularization(model_0.params, 0.005)
updates_0=gd_updates(cost=cost_0, params=model_0.params, method="sgd", learning_rate=0.01, momentum=0.9, nesterov=True)
# layer_0 --> layer_1
model_0_to_1=FeedForward(layers=[layer_0_en, MaxPooling(pool_size=(4,4))]);
out_0_to_1=model_0_to_1.fprop(images);
# layer_1
model_1=ConvAutoEncoder(layers=[layer_1_en, MaxPoolingSameSize(pool_size=(2,2)), layer_1_de])
out_1=model_1.fprop(out_0_to_1[-1], corruption_level=corruption_level)
cost_1=mean_square_cost(out_1[-1], out_0_to_1[-1])+L2_regularization(model_1.params, 0.005)
updates_1=gd_updates(cost=cost_1, params=model_1.params, method="sgd", learning_rate=0.01, momentum=0.9, nesterov=True)
# layer_1 --> layer_2
model_1_to_2=FeedForward(layers=[layer_1_en, MaxPooling(pool_size=(2,2))]);
out_1_to_2=model_1_to_2.fprop(images);
# layer_2
model_2=ConvAutoEncoder(layers=[layer_2_en, MaxPoolingSameSize(pool_size=(2,2)), layer_2_de])
out_2=model_2.fprop(out_1_to_2[-1], corruption_level=corruption_level)
cost_2=mean_square_cost(out_2[-1], out_1_to_2[-1])+L2_regularization(model_2.params, 0.005)
updates_2=gd_updates(cost=cost_2, params=model_2.params, method="sgd", learning_rate=0.01, momentum=0.9, nesterov=True)
# layer_2 --> layer_3
model_2_to_3=FeedForward(layers=[layer_2_en, MaxPooling(pool_size=(2,2))]);
out_2_to_3=model_2_to_3.fprop(images);
# layer_3
model_3=ConvAutoEncoder(layers=[layer_3_en, MaxPoolingSameSize(pool_size=(2,2)), layer_3_de])
out_3=model_3.fprop(out_2_to_3[-1], corruption_level=corruption_level)
cost_3=mean_square_cost(out_3[-1], out_2_to_3[-1])+L2_regularization(model_3.params, 0.005)
updates_3=gd_updates(cost=cost_3, params=model_3.params, method="sgd", learning_rate=0.01, momentum=0.9, nesterov=True)
# layer_3 --> layer_4
model_3_to_4=FeedForward(layers=[layer_3_en, MaxPooling(pool_size=(2,2))]);
out_3_to_4=model_3_to_4.fprop(images);
# layer_4
model_4=ConvAutoEncoder(layers=[layer_4_en, MaxPoolingSameSize(pool_size=(2,2)), layer_4_de])
out_4=model_4.fprop(out_3_to_4[-1], corruption_level=corruption_level)
cost_4=mean_square_cost(out_4[-1], out_3_to_4[-1])+L2_regularization(model_4.params, 0.005)
updates_4=gd_updates(cost=cost_4, params=model_4.params, method="sgd", learning_rate=0.01, momentum=0.9, nesterov=True)
train_0=theano.function(inputs=[idx, corruption_level],
outputs=[cost_0],
updates=updates_0,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]})
train_1=theano.function(inputs=[idx, corruption_level],
outputs=[cost_1],
updates=updates_1,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]})
train_2=theano.function(inputs=[idx, corruption_level],
outputs=[cost_2],
updates=updates_2,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]})
train_3=theano.function(inputs=[idx, corruption_level],
outputs=[cost_3],
updates=updates_3,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]})
train_4=theano.function(inputs=[idx, corruption_level],
outputs=[cost_4],
updates=updates_4,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]})
# learning_rate = theano.shared(np.asarray(initial_learning_rate,dtype=theano.config.floatX))
# decay_learning_rate = theano.function(inputs=[], outputs=learning_rate,
# updates={learning_rate: learning_rate * learning_rate_decay})
print "[MESSAGE] The 5-layer model is built"
corr={}
corr[0]=np.random.uniform(low=0.5, high=0.9, size=1).astype("float32")
corr[1]=np.random.uniform(low=0.5, high=0.9, size=1).astype("float32")
corr[2]=np.random.uniform(low=0.5, high=0.9, size=1).astype("float32")
corr[3]=np.random.uniform(low=0.5, high=0.9, size=1).astype("float32")
corr[4]=np.random.uniform(low=0.5, high=0.9, size=1).astype("float32")
min_cost={0:None,
1:None,
2:None,
3:None,
4:None}
corr_best={0:corr[0],
1:corr[0],
2:corr[0],
3:corr[0],
4:corr[0]}
max_iter={0:0,
1:0,
2:0,
3:0,
4:0}
n_epochs=200
epoch = 0
while (epoch < n_epochs):
epoch_time_start = time.time()
epoch = epoch + 1
if epoch == 50:
updates_4=gd_updates(cost=cost_4, params=model_4.params, method="sgd", learning_rate=0.001, momentum=0.9, nesterov=True)
updates_3=gd_updates(cost=cost_3, params=model_3.params, method="sgd", learning_rate=0.001, momentum=0.9, nesterov=True)
updates_2=gd_updates(cost=cost_2, params=model_2.params, method="sgd", learning_rate=0.001, momentum=0.9, nesterov=True)
updates_1=gd_updates(cost=cost_1, params=model_1.params, method="sgd", learning_rate=0.001, momentum=0.9, nesterov=True)
updates_0=gd_updates(cost=cost_0, params=model_0.params, method="sgd", learning_rate=0.001, momentum=0.9, nesterov=True)
if epoch == 100:
updates_4=gd_updates(cost=cost_4, params=model_4.params, method="sgd", learning_rate=0.0001, momentum=0.9, nesterov=True)
updates_3=gd_updates(cost=cost_3, params=model_3.params, method="sgd", learning_rate=0.0001, momentum=0.9, nesterov=True)
updates_2=gd_updates(cost=cost_2, params=model_2.params, method="sgd", learning_rate=0.0001, momentum=0.9, nesterov=True)
updates_1=gd_updates(cost=cost_1, params=model_1.params, method="sgd", learning_rate=0.0001, momentum=0.9, nesterov=True)
updates_0=gd_updates(cost=cost_0, params=model_0.params, method="sgd", learning_rate=0.0001, momentum=0.9, nesterov=True)
if epoch == 150:
updates_4=gd_updates(cost=cost_4, params=model_4.params, method="sgd", learning_rate=0.0001, momentum=0.9, nesterov=True)
updates_3=gd_updates(cost=cost_3, params=model_3.params, method="sgd", learning_rate=0.0001, momentum=0.9, nesterov=True)
updates_2=gd_updates(cost=cost_2, params=model_2.params, method="sgd", learning_rate=0.0001, momentum=0.9, nesterov=True)
updates_1=gd_updates(cost=cost_1, params=model_1.params, method="sgd", learning_rate=0.0001, momentum=0.9, nesterov=True)
updates_0=gd_updates(cost=cost_0, params=model_0.params, method="sgd", learning_rate=0.0001, momentum=0.9, nesterov=True)
c_0 = c_1 = c_2 = c_3 = c_4 = []
for batch_index in xrange(n_train_batches):
for rep in xrange(8):
train_cost=train_4(batch_index, corr_best[4][0])
c_4.append(train_cost)
train_cost=train_3(batch_index, corr_best[3][0])
c_3.append(train_cost)
train_cost=train_2(batch_index, corr_best[2][0])
c_2.append(train_cost)
train_cost=train_1(batch_index, corr_best[1][0])
c_1.append(train_cost)
train_cost=train_0(batch_index, corr_best[0][0])
c_0.append(train_cost)
if min_cost[0]==None:
min_cost[0]=np.mean(c_0)
else:
if (np.mean(c_0)<min_cost[0]*0.5) or (max_iter[0]>=20):
min_cost[0]=np.mean(c_0)
corr_best[0][0]=corr[0]
corr[0]=np.random.uniform(low=corr_best[0][0], high=corr_best[0][0]-0.1, size=1).astype("float32")
max_iter[0]=0
else:
max_iter[0]+=1
if min_cost[1]==None:
min_cost[1]=np.mean(c_1)
else:
if (np.mean(c_1)<min_cost[1]*0.5) or (max_iter[1]>=20):
min_cost[1]=np.mean(c_1)
corr_best[1][0]=corr[1]
corr[1]=np.random.uniform(low=corr_best[1][0], high=corr_best[1][0]-0.1, size=1).astype("float32")
max_iter[1]=0
else:
max_iter[1]+=1
if min_cost[2]==None:
min_cost[2]=np.mean(c_2)
else:
if (np.mean(c_2)<min_cost[2]*0.5) or (max_iter[2]>=20):
min_cost[2]=np.mean(c_2)
corr_best[2][0]=corr[2]
corr[2]=np.random.uniform(low=corr_best[2][0], high=corr_best[2][0]-0.1, size=1).astype("float32")
max_iter[2]=0
else:
max_iter[2]+=1
if min_cost[3]==None:
min_cost[3]=np.mean(c_3)
else:
if (np.mean(c_3)<min_cost[3]*0.5) or (max_iter[3]>=20):
min_cost[3]=np.mean(c_3)
corr_best[3][0]=corr[3]
corr[3]=np.random.uniform(low=corr_best[3][0], high=corr_best[3][0]-0.1, size=1).astype("float32")
max_iter[3]=0
else:
max_iter[3]+=1
if min_cost[4]==None:
min_cost[4]=np.mean(c_4)
else:
if (np.mean(c_4)<min_cost[4]*0.5) or (max_iter[4]>=20):
min_cost[4]=np.mean(c_4)
corr_best[4][0]=corr[4]
corr[4]=np.random.uniform(low=corr_best[4][0], high=corr_best[4][0]-0.1, size=1).astype("float32")
max_iter[4]=0
else:
max_iter[4]+=1
print 'Training epoch %d, cost ' % epoch, np.mean(c_0), str(corr_best[0][0]), min_cost[0], max_iter[0]
print ' ', np.mean(c_1), str(corr_best[1][0]), min_cost[1], max_iter[1]
print ' ', np.mean(c_2), str(corr_best[2][0]), min_cost[2], max_iter[2]
print ' ', np.mean(c_3), str(corr_best[3][0]), min_cost[3], max_iter[3]
print ' ', np.mean(c_4), str(corr_best[4][0]), min_cost[4], max_iter[4]
print '[TIME TAKEN ] :', (time.time() - epoch_time_start)
print "[MESSAGE] The model is trained"
################################## BUILD SUPERVISED MODEL #######################################
pool_0=MaxPooling(pool_size=(4,4));
pool_1=MaxPooling(pool_size=(2,2));
pool_2=MaxPooling(pool_size=(2,2));
pool_3=MaxPooling(pool_size=(2,2));
flattener=Flattener()
layer_5=ReLULayer(in_dim=128*1*1,
out_dim=64)
layer_6=SoftmaxLayer(in_dim=64,
out_dim=10)
model_sup=FeedForward(layers=[layer_0_en, pool_0, layer_1_en, pool_1, layer_2_en, pool_2, layer_3_en, pool_3, layer_4_en,
flattener, layer_5, layer_6])
out_sup=model_sup.fprop(images)
cost_sup=categorical_cross_entropy_cost(out_sup[-1], y)
updates=gd_updates(cost=cost_sup, params=model_sup.params, method="sgd", learning_rate=0.1, momentum=0.9, nesterov=True)
train_sup=theano.function(inputs=[idx],
outputs=cost_sup,
updates=updates,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size],
y: train_set_y[idx * batch_size: (idx + 1) * batch_size]})
test_sup=theano.function(inputs=[idx],
outputs=model_sup.layers[-1].error(out_sup[-1], y),
givens={X: test_set_x[idx * batch_size: (idx + 1) * batch_size],
y: test_set_y[idx * batch_size: (idx + 1) * batch_size]})
print "[MESSAGE] The supervised model is built"
test_record=np.zeros((n_epochs, 1))
epoch = 0
while (epoch < n_epochs):
epoch+=1
for minibatch_index in xrange(n_train_batches):
mlp_minibatch_avg_cost = train_sup(minibatch_index)
iteration = (epoch - 1) * n_train_batches + minibatch_index
if (iteration + 1) % n_train_batches == 0:
print 'MLP MODEL'
test_losses = [test_sup(i) for i in xrange(n_test_batches)]
test_record[epoch-1] = np.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error %f %%') %
(epoch, minibatch_index + 1, n_train_batches, test_record[epoch-1] * 100.))
end_time=time.time()
print "\n---------------------------------------------"
print "Total time taken is : ", (end_time - start_time)/60, ' minutes'
filters=[]
filters.append(model_sup.layers[0].filters.get_value(borrow=True))
filters.append(model_sup.layers[2].filters.get_value(borrow=True))
filters.append(model_sup.layers[4].filters.get_value(borrow=True))
filters.append(model_sup.layers[6].filters.get_value(borrow=True))
filters.append(model_sup.layers[8].filters.get_value(borrow=True))
# filters=model_1.layers[0].filters.get_value(borrow=True);
pickle.dump(test_record, open("convae_destin_7.pkl", "w"))
for i in xrange(128):
for j in xrange(5):
if i > (len(filters[j]) -1):
continue
image_adr="/home/profloo/Documents/ConvAE-DeSTIN/scripts/plots/layer_%d_filter_%d.eps" % (j,i)
plt.imshow(filters[j][i, 0, :, :], cmap = plt.get_cmap('gray'), interpolation='nearest')
plt.axis('off')
plt.savefig(image_adr , bbox_inches='tight', pad_inches=0)
if i%10 == 0:
print 'completed saving filters till : ', i
| apache-2.0 |
saiwing-yeung/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 5 | 4635 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import silhouette_score
from sklearn.metrics.cluster import calinski_harabaz_score
from sklearn.metrics import pairwise_distances
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
def test_non_encoded_labels():
dataset = datasets.load_iris()
X = dataset.data
labels = dataset.target
assert_equal(
silhouette_score(X, labels + 10), silhouette_score(X, labels))
def test_non_numpy_labels():
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
assert_equal(
silhouette_score(list(X), list(y)), silhouette_score(X, y))
def test_calinski_harabaz_score():
rng = np.random.RandomState(seed=0)
# Assert message when there is only one label
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.zeros(10))
# Assert message when all point are in different clusters
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.arange(10))
# Assert the value is 1. when all samples are equals
assert_equal(1., calinski_harabaz_score(np.ones((10, 2)),
[0] * 5 + [1] * 5))
# Assert the value is 0. when all the mean cluster are equal
assert_equal(0., calinski_harabaz_score([[-1, -1], [1, 1]] * 10,
[0] * 10 + [1] * 10))
# General case (with non numpy arrays)
X = ([[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 +
[[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
assert_almost_equal(calinski_harabaz_score(X, labels),
45 * (40 - 4) / (5 * (4 - 1)))
| bsd-3-clause |
binarybana/samcnet | samcnet/data.py | 1 | 16807 | import os
import sys
import tempfile
import subprocess as sb
import numpy as np
import pandas as pa
from os import path
from sklearn.feature_selection import SelectKBest, f_classif
import scipy.stats as st
import scipy.stats.distributions as di
from statsmodels.sandbox.distributions.mv_normal import MVT,MVNormal
from lori import sample_invwishart
param_template = """.d NoOfTrainSamples0 {Ntrn}
.d NoOfTrainSamples1 {Ntrn}
.d NoOfTestSamples0 {Ntst}
.d NoOfTestSamples1 {Ntst}
.d TotalFeatures {f_tot}
.d GlobalFeatures {f_glob}
.d HeteroSubTypes {subclasses}
.d HeteroFeaturesPerSubType {f_het}
.d RandomFeatures {f_rand}
.d CorrBlockSize {blocksize}
.d CorrType 1
.f Rho {rho}
.d ScrambleFlag 0
.f Mu_0 {mu0}
.f Mu_1 {mu1}
.f Sigma_0 {sigma0}
.f Sigma_1 {sigma1}
"""
def setv(p,s,d,conv=None):
if s not in p:
p[s] = d
return d
elif conv is not None:
return conv(p[s])
else:
p[s]
def data_yj(params):
Ntrn = params['Ntrn']
Ntst = params['Ntst']
num_feat = params['num_feat']
lowd = params['lowd']
highd = params['highd']
seed = params['seed']
# Run Yousef/Jianping RNA Synthetic
currdir = path.abspath('.')
synloc = path.expanduser('~/GSP/research/samc/synthetic/rnaseq')
YJparams = param_template.format(**params)
try:
os.chdir(synloc)
fid,fname = tempfile.mkstemp(dir='params')
fname = path.basename(fname)
fid = os.fdopen(fid,'w')
fid.write(YJparams)
fid.close()
inspec = 'gen -i params/%s -c 0.05 -l %f -h %f -s %d' % \
(fname, lowd, highd, seed)
spec = path.join(synloc, inspec).split()
sb.check_call(spec)
except Exception as e:
print "ERROR in data_yj: " + str(e)
finally:
os.chdir(currdir)
try:
trn_path = path.join(synloc, 'out','%s_trn.txt'%fname)
tst_path = path.join(synloc, 'out','%s_tst.txt'%fname)
raw_trn_data = np.loadtxt(trn_path,
delimiter=',', skiprows=1)
selector = SelectKBest(f_classif, k=num_feat)
trn_labels = np.hstack(( np.zeros(Ntrn), np.ones(Ntrn) ))
selector.fit(raw_trn_data, trn_labels)
raw_tst_data = np.loadtxt(tst_path,
delimiter=',', skiprows=1)
except Exception as e:
print "ERROR in data_yj: " + str(e)
finally:
os.remove(trn_path)
os.remove(tst_path)
trn0, trn1, tst0, tst1 = gen_labels(Ntrn, Ntrn, Ntst, Ntst)
rawdata = np.vstack(( raw_trn_data, raw_tst_data ))
pvind = selector.pvalues_.argsort()
np.random.shuffle(pvind)
feats = np.zeros(rawdata.shape[1], dtype=bool)
feats[pvind[:num_feat]] = True
calib = ~feats
return rawdata, trn0, trn1, tst0, tst1, feats, calib
def gen_data(mu, cov, n, lowd, highd):
lams = MVNormal(mu, cov).rvs(n)
ps = np.empty_like(lams)
ps = di.poisson.rvs(di.uniform.rvs(lowd,highd-lowd,size=lams.shape) * np.exp(lams))
return ps
def data_jk(params):
D = params['num_gen_feat']
num_feat = params['num_feat']
Ntrn = params['Ntrn']
Ntst = params['Ntst']
sigma0 = params['sigma0']
sigma1 = params['sigma1']
mu0 = params['mu0']
mu1 = params['mu1']
kappa = params['kappa']
seed = params['seed']
rseed = params['rseed']
lowd = params['lowd']
highd = params['highd']
np.random.seed(seed)
lmu0 = np.ones(D) * mu0
lmu1 = np.ones(D) * mu1
#rho0 = -0.4
#rho1 = 0.4
#cov0 = np.array([[1, rho0],[rho0, 1]])
#cov1 = np.array([[1, rho1],[rho1, 1]])
#cov0 = np.eye(D)
#cov1 = np.eye(D)
if kappa >= 1000:
cov0 = sample_invwishart(np.eye(D)*sigma0, D+2)
cov1 = sample_invwishart(np.eye(D)*sigma1, D+2)
cov0 = np.diag(np.diag(cov0))
cov1 = np.diag(np.diag(cov1))
else:
cov0 = sample_invwishart(np.eye(D)*(kappa-D-1)*sigma0, kappa)
cov1 = sample_invwishart(np.eye(D)*(kappa-D-1)*sigma1, kappa)
#v1,v2 = 0.2,1.3
#cov1 = cov0
#trn_data0 = np.vstack (( gen_data(np.array([v1, v2]),cov0,Ntrn/2),
#gen_data(np.array([v2,v1]),cov0,Ntrn/2) ))
#tst_data0 = np.vstack (( gen_data(np.array([v1, v2]),cov0,Ntst/2),
#gen_data(np.array([v2,v1]),cov0,Ntst/2) ))
trn_data0 = gen_data(lmu0,cov0,Ntrn,lowd,highd)
trn_data1 = gen_data(lmu1,cov1,Ntrn,lowd,highd)
tst_data0 = gen_data(lmu0,cov0,Ntst,lowd,highd)
tst_data1 = gen_data(lmu1,cov1,Ntst,lowd,highd)
rawdata = np.vstack(( trn_data0, trn_data1, tst_data0, tst_data1 ))
trn0, trn1, tst0, tst1 = gen_labels(Ntrn, Ntrn, Ntst, Ntst)
#selector = SelectKBest(f_classif, k=num_feat)
#trn_data = np.vstack(( trn_data0, trn_data1 ))
#trn_labels = np.hstack(( np.zeros(Ntrn), np.ones(Ntrn) ))
#selector.fit(trn_data, trn_labels)
#pvind = selector.pvalues_.argsort()
#np.random.shuffle(pvind)
feats = np.zeros(rawdata.shape[1], dtype=bool)
#feats[pvind[:num_feat]] = True
feats[:num_feat] = True
calib = ~feats
return rawdata, trn0, trn1, tst0, tst1, feats, calib
def data_tcga(params):
Ntrn = params['Ntrn']
num_feat = params['num_feat']
low = params['low_filter']
high = params['high_filter']
if 'c' in params:
#Interpret Ntrn as total training samples
Ntrn0 = int(round(params['c'] * Ntrn))
Ntrn1 = int(round((1-params['c']) * Ntrn))
else:
Ntrn0, Ntrn1 = Ntrn, Ntrn
print("Training data sizes: {}, {}".format(Ntrn0, Ntrn1))
store = pa.HDFStore(os.path.expanduser('~/largeresearch/seq-data/store.h5'))
luad = store['lusc_norm'].as_matrix()
lusc = store['luad_norm'].as_matrix()
# Grab random training set and use the rest as testing
luad_inds = np.arange(luad.shape[1])
lusc_inds = np.arange(lusc.shape[1])
np.random.shuffle(luad_inds)
np.random.shuffle(lusc_inds)
trn_data = np.round(np.hstack((
lusc[:,lusc_inds[:Ntrn0]],
luad[:,luad_inds[:Ntrn1]] )).T)
tst_data = np.round(np.hstack((
lusc[:,lusc_inds[Ntrn0:]],
luad[:,luad_inds[Ntrn1:]] )).T)
# Generate labels
trn_labels = np.hstack(( np.zeros(Ntrn0), np.ones(Ntrn1) ))
trn0, trn1, tst0, tst1 = gen_labels(Ntrn0, Ntrn1, lusc.shape[1]-Ntrn0, luad.shape[1]-Ntrn1)
# Select a subset of the features, then select a further subset based on
# Univariate F tests
good_cols = (trn_data.mean(axis=0) < high) & (trn_data.mean(axis=0) > low)
low_trn_data = trn_data[:, good_cols]
low_tst_data = tst_data[:, good_cols]
#selector = SelectKBest(f_classif, k=4)
#selector.fit(low_trn_data, trn_labels)
#pvind = selector.pvalues_.argsort()
##########
#tst_labels = np.hstack(( np.zeros(lusc_inds.size-Ntrn0), np.ones(luad_inds.size-Ntrn1) ))
#pvind2 = selector.pvalues_.argsort()
#selector2 = SelectKBest(f_classif, k=4)
#selector2.fit(low_tst_data, tst_labels)
#print low, high
#print trn_data.shape[1]
#print good_cols.sum()
#print selector.pvalues_[pvind[:10]]
#print selector.pvalues_[pvind[-10:]]
#print selector2.pvalues_[pvind2[:10]]
#print selector2.pvalues_[pvind2[-10:]]
###################
rawdata = np.vstack(( low_trn_data, low_tst_data ))
feats_ind = np.random.choice(np.arange(low_trn_data.shape[1]), num_feat, replace=False)
feats = np.zeros(rawdata.shape[1], dtype=bool)
feats[feats_ind] = True
#feats[:num_feat] = True
calib = ~feats
store.close()
return rawdata, trn0, trn1, tst0, tst1, feats, calib
def data_karen(params):
Ntrn = params['Ntrn']
num_feat = params['num_feat']
low = params['low_filter']
high = params['high_filter']
num_candidates = params['num_candidates']
cat = params['split_category']
use_candidates = params['use_candidates']
datapath = os.path.expanduser('~/GSP/research/samc/samcnet/data/')
store = pa.HDFStore(datapath+'karen-clean2.h5')
data = store['data']
store.close()
#num_cols = pa.Index(map(str.strip,open(datapath+'colon_rat.txt','r').readlines()))
num_cols = data.columns - pa.Index(['oil', 'fiber', 'treatment'])
numdata = data[num_cols]
cls0 = data.loc[:,cat].iloc[0]
aom = data[cat] == cls0
aom_inds = data.index[aom]
saline_inds = data.index - aom_inds
trn_inds = pa.Index(np.random.choice(aom_inds, Ntrn, replace=False)) \
+ pa.Index(np.random.choice(saline_inds, Ntrn, replace=False))
tst_inds = data.index - trn_inds
trn_labels = np.array((data.loc[trn_inds, cat]==cls0).astype(np.int64) * 1)
# Feature selection, first stage
good_cols = numdata.columns[(numdata.mean() <= high) & (numdata.mean() >= low)]
print("# Good columns: {}, # Total columns: {}".format(
len(good_cols), numdata.shape[1]))
# F Tests
selector = SelectKBest(f_classif, k=4)
selector.fit(numdata.loc[:, good_cols].as_matrix().astype(np.float), aom)
pvind = selector.pvalues_.argsort()
#print(selector.pvalues_[pvind2[:50]])
if not use_candidates:
np.random.shuffle(pvind)
rawdata = numdata[good_cols].as_matrix()
candidates = pvind[:num_candidates]
feats_ind = np.random.choice(candidates, num_feat, replace=False)
feats = np.zeros(rawdata.shape[1], dtype=bool)
feats[feats_ind] = True
calib = ~feats
# It's hideous!!!
# Wow... converting from indices/numeric locations to a boolean array
# has never looked so ugly... but it works for now.
trn0 = pa.Series(np.zeros(data.index.size), index=data.index, dtype=bool)
trn1 = pa.Series(np.zeros(data.index.size), index=data.index, dtype=bool)
tst0 = pa.Series(np.zeros(data.index.size), index=data.index, dtype=bool)
tst1 = pa.Series(np.zeros(data.index.size), index=data.index, dtype=bool)
trn0.loc[trn_inds & data.index[(data.loc[:, cat] == cls0)]] = True
trn1.loc[trn_inds & data.index[(data.loc[:, cat] != cls0)]] = True
tst0.loc[tst_inds & data.index[(data.loc[:, cat] == cls0)]] = True
tst1.loc[tst_inds & data.index[(data.loc[:, cat] != cls0)]] = True
trn0 = np.array(trn0)
trn1 = np.array(trn1)
tst0 = np.array(tst0)
tst1 = np.array(tst1)
return rawdata, trn0, trn1, tst0, tst1, feats, calib
def data_test(params):
trn_data = np.vstack(( np.zeros((10,2)), np.ones((10,2))+2 ))
trn_labels = np.hstack(( np.ones(10), np.zeros(10) ))
tst_data = np.vstack(( np.zeros((1000,2)), np.ones((1000,2)) ))
tst_labels = np.hstack(( np.ones(1000), np.zeros(1000) ))
return trn_data, trn_labels, tst_data, tst_labels
def gen_labels(a,b,c,d):
trn0 = np.hstack(( np.ones(a), np.zeros(b), np.zeros(c+d) )).astype(bool)
trn1 = np.hstack(( np.zeros(a), np.ones(b), np.zeros(c+d) )).astype(bool)
tst0 = np.hstack(( np.zeros(a+b), np.ones(c), np.zeros(d) )).astype(bool)
tst1 = np.hstack(( np.zeros(a+b), np.zeros(c), np.ones(d) )).astype(bool)
return trn0, trn1, tst0, tst1
def plot_p_hist(data, sel, label):
selector = SelectKBest(f_classif, k=4)
print sel['tst'].shape
print data.loc[sel['tst'],:].shape
selector.fit(data.loc[sel['tst'], :], sel['tstl'])
pvals = selector.pvalues_
pvind = pvals.argsort()
pvals[np.isnan(pvals)] = 1
import pylab as p
p.hist(pvals, bins=np.logspace(-5,0,30), label=label, log=True, histtype='step')
#def norm(data1, data2):
#mu = data1.mean(axis=0)
#std = np.sqrt(data1.var(axis=0, ddof=1))
#return (data1 - mu) / std, (data2 - mu) / std
#def split(data, labels):
#return data[labels==0,:], data[labels==1,:]
#def shuffle_features(trn, tst):
#D = trn.shape[1]
#assert D == tst.shape[1]
#ind = np.arange(D)
#np.random.shuffle(ind)
#return trn[:,ind], tst[:,ind]
def subsample(sel, N=1):
"""
Takes N samples from each class's training sample and moves them to holdout
"""
s0 = np.random.choice(sel['trn0'].nonzero()[0], size=N, replace=False)
s1 = np.random.choice(sel['trn1'].nonzero()[0], size=N, replace=False)
sel['trn0'][s0] = False
sel['trn'][s0] = False
sel['tst0'][s0] = True
sel['tst'][s0] = True
sel['trn1'][s1] = False
sel['trn'][s1] = False
sel['tst1'][s1] = True
sel['tst'][s1] = True
# FIXME: Hack
sel['trnl'] = (sel['trn0']*1 + sel['trn1']*2 - 1)[sel['trn']]
sel['tstl'] = (sel['tst0']*1 + sel['tst1']*2 - 1)[sel['tst']]
return sel
def get_data(method, params):
"""
Returns a selector dictionary, rawdata matrix and normalized data matrix
where the selector dictionary has the following keys defined:
trn, trn0, trn1, tst, tst0, tst1, feats, calib
where the last two are for the features and calibration features
"""
rawdata, trn0, trn1, tst0, tst1, feats, calib = method(params)
trn = trn0 | trn1
tst = tst0 | tst1
# FIXME: Hack
trnl = (trn0*1 + trn1*2 - 1)[trn]
tstl = (tst0*1 + tst1*2 - 1)[tst]
num_calibs = setv(params, 'num_calibs', 5, int)
if num_calibs == 0 or params['num_gen_feat'] == params['num_feat']:
# This should really be done cleaner, but I can accrue a little
# technical debt every now and then right?... right?!
sel = dict(trn0=trn0, trn1=trn1, trn=trn, tst0=tst0, tst1=tst1, tst=tst,
tstl=tstl, trnl=trnl)
else:
size_calibs = setv(params, 'size_calibs', 2, int)
subcalibs = calib.nonzero()[0]
clip_calibs = subcalibs.size - (num_calibs * size_calibs)
assert clip_calibs >= 0, clip_calibs
np.random.shuffle(subcalibs)
subcalibs = np.split(subcalibs[:-clip_calibs], num_calibs)
sel = dict(trn0=trn0, trn1=trn1, trn=trn, tst0=tst0, tst1=tst1, tst=tst,
feats=feats, calib=calib,
tstl=tstl, trnl=trnl,
subcalibs=subcalibs)
# Normalize
mu = rawdata[trn,:].mean(axis=0)
std = np.sqrt(rawdata[trn,:].var(axis=0, ddof=1))
normdata = (rawdata - mu) / std
return sel, pa.DataFrame(rawdata), pa.DataFrame(normdata)
def setv(p,s,d,conv=None):
if s not in p:
p[s] = d
return d
elif conv is not None:
return conv(p[s])
else:
p[s]
def get_test_params():
params = {}
iters = setv(params, 'iters', int(1e4), int)
num_feat = setv(params, 'num_feat', 5, int)
num_gen_feat = setv(params, 'num_gen_feat', 30, int)
seed = setv(params, 'seed', np.random.randint(10**8), int)
rseed = setv(params, 'rseed', np.random.randint(10**8), int)
Ntrn = setv(params, 'Ntrn', 20, int)
Ntst = setv(params, 'Ntst', 300, int)
f_glob = setv(params, 'f_glob', 10, int)
subclasses = setv(params, 'subclasses', 2, int)
f_het = setv(params, 'f_het', 20, int)
f_rand = setv(params, 'f_rand', 20, int)
rho = setv(params, 'rho', 0.6, float)
f_tot = setv(params, 'f_tot', f_glob+f_het*subclasses+f_rand, int)
blocksize = setv(params, 'blocksize', 5, int)
mu0 = setv(params, 'mu0', 0.8, float)
mu1 = setv(params, 'mu1', -0.8, float)
sigma0 = setv(params, 'sigma0', 0.6, float)
sigma1 = setv(params, 'sigma1', 0.2, float)
kappa = setv(params, 'kappa', 32.0, float)
lowd = setv(params, 'lowd', 9.0, float)
highd = setv(params, 'highd', 11.0, float)
numlam = setv(params, 'numlam', 20, int)
low = setv(params, 'low_filter', 1, int)
high = setv(params, 'high_filter', 10, int)
num_candidates = setv(params, 'num_candidates', 50, int)
cat = setv(params, 'split_category', 'treatment', str)
use_candidates = setv(params, 'use_candidates', False, bool)
return params
if __name__ == '__main__':
import pylab as p
p.figure()
params = get_test_params()
sel, raw, norm = get_data(data_tcga, params)
print(sel['tst'].sum(), sel['tst0'].sum(), sel['tst1'].sum())
sys.exit()
#plot_p_hist(raw, sel, 'tcga low')
sel, raw, norm = get_data(data_karen, params)
plot_p_hist(raw, sel, 'karen low')
params['low_filter'] = 0
params['high_filter'] = 10000
sel, raw, norm = get_data(data_karen, params)
plot_p_hist(raw, sel, 'karen all')
sel, raw, norm = get_data(data_jk, params)
plot_p_hist(raw, sel, 'ic')
p.legend(loc='best')
#p.xscale('log')
p.show()
sys.exit()
def test(out):
sel, raw, norm = out
assert raw.shape == norm.shape
for k,v in sel.iteritems():
if type(v) == np.ndarray:
assert v.sum() > 0, str(k) + str(v.shape)
assert v.sum() < max(raw.shape)
#assert v.shape[0] == raw.shape[0] or v.shape[0] == raw.shape[1]
test(get_data(data_yj, params))
test(get_data(data_jk, params))
test(get_data(data_tcga, params))
test(get_data(data_karen, params))
| mit |
HengfengLi/algorithms-impl | 01.graham_scan/graham_scan.py | 1 | 5192 | # Re-write the C code from Computational Geometry in C - Section 3.5.
# max # of points
PMAX = 1000
P_origin = None
class Coord:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "x:%.2f,y:%.2f" % (self.x, self.y)
def __repr__(self):
return self.__str__()
class tsPoint:
def __init__(self, id, coord):
self.id = id
self.coord = coord
self.flag_deleted = False
def __str__(self):
return "vnum:%d,coord:%s,deleted:%s" \
% (self.id, self.coord, self.flag_deleted)
def __repr__(self):
return self.__str__()
class tStackCell:
def __init__(self, p):
self.p = p
self.next = None
class tStack:
def __init__(self):
self.top = None
self.num = 0
def pop(self):
if self.num == 0: return None
temp = self.top
self.top = self.top.next
self.num -= 1
return temp.p
def push(self, p):
c = tStackCell(p)
c.next = self.top
self.top = c
self.num += 1
def values(self):
t = self.top
while t:
yield t.p
t = t.next
def __str__(self):
t = self.top
output = ""
while t:
output += "vnum=%d\tx=%.2f\ty=%.2f\n" \
% (t.p.id, t.p.coord.x, t.p.coord.y)
t = t.next
return output
def __repr__(self):
return self.__str__()
def area2(a, b, c):
return (b.x-a.x) * (c.y-a.y) - (c.x-a.x)*(b.y-a.y)
def compare(pi, pj):
a = area2(P_origin.coord, pi.coord, pj.coord)
if a > 0: return -1
if a < 0: return 1
# collinear with P[0]
x = abs(pi.coord.x - P_origin.coord.x) - abs(pj.coord.x - P_origin.coord.x)
y = abs(pi.coord.y - P_origin.coord.y) - abs(pj.coord.y - P_origin.coord.y)
if x < 0 or y < 0:
pi.flag_deleted = True
return -1
elif x > 0 or y > 0:
pj.flag_deleted = True
return 1
else:
# points are coincident
if pi.id > pj.id:
pj.flag_deleted = True
else:
pi.flag_deleted = True
return 0
def swap(P, i, j):
temp = P[i]
P[i] = P[j]
P[j] = temp
def find_lowest(P):
# get a copy
P = P[:]
# index of lowest
m = 0
for i in range(1,len(P)):
if (P[i].coord.y < P[m].coord.y or P[i].coord.y == P[m].coord.y) \
and (P[i].coord.x > P[m].coord.x):
m = i
swap(P, 0, m)
return P
def graham(P):
# initalize stack
stack = tStack()
stack.push(P[0])
stack.push(P[1])
assert(stack.num == 2)
# bottom two elements will never be removed
i = 2
while i < len(P):
# print "i", i
p1 = stack.top.next.p
p2 = stack.top.p
# print area2(p1.coord, p2.coord, P[i].coord)
if area2(p1.coord, p2.coord, P[i].coord) > 0:
stack.push(P[i])
i += 1
else:
# print "stack.pop"
stack.pop()
return stack
def graham_scan(coords):
global P_origin
# construct the list of points
P = []
for i in range(len(coords)):
x, y = coords[i]
P.append(tsPoint(i, Coord(x, y)))
# actual # of points
n = len(P)
# find the right-most of lowest point
P = find_lowest(P)
print "lowest:", P[0]
P_origin = P[0]
# sort other points angularly
new_P = [P[0]] + sorted(P[1:], cmp=compare)
# squash
new_P = filter(lambda p: p.flag_deleted != True ,new_P)
# print new_P
stack = graham(new_P)
return [(p.coord.x, p.coord.y) for p in stack.values()]
if __name__ == '__main__':
import matplotlib.pyplot as plt
coords = [(3,-2), (5,1), (7,4), (6,5), (4,2), (3,3), (3,5), (2,5), (0,5), \
(0,1), (-3,4), (-2,2), (0,0), (-3,2), (-5,2), (-5,1), (-5,-1), \
(1,-2), (-3,-2)]
assert(len(coords) == 19)
convex_hull_points = graham_scan(coords)
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.grid(True, which='both')
# set the x-spine (see below for more info on `set_position`)
ax.spines['left'].set_position('zero')
# turn off the right spine/ticks
ax.spines['right'].set_color('none')
# set the y-spine
ax.spines['bottom'].set_position('zero')
# turn off the top spine/ticks
ax.spines['top'].set_color('none')
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.set_xlim([-6,8])
ax.set_ylim([-3,6])
x1 = [p[0] for p in coords]
y1 = [p[1] for p in coords]
# "clip_on=False, zorder=100" makes the points are above axes and girds
ax.scatter(x1, y1, c='red', clip_on=False, zorder=100)
x2 = [p[0] for p in convex_hull_points + [convex_hull_points[0]]]
y2 = [p[1] for p in convex_hull_points + [convex_hull_points[0]]]
ax.plot(x2, y2, marker='o', clip_on=False, zorder=100)
plt.show() | mit |
micahhausler/pandashells | pandashells/test/p_regplot_test.py | 10 | 1072 | #! /usr/bin/env python
from mock import patch
from unittest import TestCase
import numpy as np
import pandas as pd
from pandashells.bin.p_regplot import main, make_label
class MakeLabelTests(TestCase):
def test_make_label_html(self):
label = make_label(coeffs=[1, 2, 3], savefig=['test.html'])
self.assertEqual(label, 'y = (3) + (2) x + (1) x ^ 2')
def test_make_label_tex(self):
label = make_label(coeffs=[1, 2], savefig=['test.png'])
self.assertEqual(label, '$y = (2) + (1) x$')
class MainTests(TestCase):
@patch(
'pandashells.bin.p_regplot.sys.argv',
'p.regplot -x x -y y'.split())
@patch('pandashells.bin.p_regplot.io_lib.df_from_input')
@patch('pandashells.bin.p_regplot.plot_lib.show')
def test_cli_non_plain(self, show_mock, df_from_input_mock):
df_in = pd.DataFrame({
'x': np.arange(1, 101),
'y': np.arange(1, 101) + np.random.randn(100)
})
df_from_input_mock.return_value = df_in
main()
self.assertTrue(show_mock.called)
| bsd-2-clause |
tylerc-atx/script_library | kfolds_score_any_model.py | 1 | 1105 | '''
Scores any model given a model class and data
Originally writte by Shawn Terryah
'''
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score, average_precision_score, recall_score
def KFolds_any_model(X_train, y_train, k, model_to_fit):
''' k = number of folds. Returns mean accuracy, precision,
and recall of any model (kNN(), LogisticRegression(),
LinearRegression, etc...'''
index = 0
kf = KFold(n_splits = k)
accuracy = np.empty(k)
precision = np.empty(k)
recall = np.empty(k)
model = model_to_fit
for train, test in kf.split(X_train, y_train):
model.fit(X_train[train], y_train[train])
pred = model.predict(X_train[test])
accuracy[index] = accuracy_score(y_train[test], pred)
precision[index] = average_precision_score(y_train[test], pred)
recall[index] = recall_score(y_train[test], pred)
index += 1
return 'accuracy: {}, precision: {}, recall: {}'.format(round(accuracy.mean(), 3),
round(precision.mean(), 3), round(recall.mean(), 3))
| mit |
treycausey/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
scholer/py2cytoscape | tests/test_network.py | 1 | 7157 | # -*- coding: utf-8 -*-
import unittest
import networkx as nx
from py2cytoscape.data.cyrest_client import CyRestClient
import py2cytoscape.util.dataframe as df_util
import json
def pp(dict_data):
print(json.dumps(dict_data, indent=4))
class CyRestClientTests(unittest.TestCase):
def setUp(self):
self.client = CyRestClient()
# cleanup
self.client.network.delete_all()
def test_cyrest_client(self):
print('\n---------- Client status tests start -----------\n')
# Make sure CyREST server is running
status = self.client.status()
self.assertIsNotNone(status)
pp(status)
self.assertEqual('v1', status['apiVersion'])
print('\n---------- Client status tests finished! -----------\n')
def test_create_network(self):
print('\n---------- Create network Tests Start -----------\n')
# Create empty network
num_networks = 5
for i in range(num_networks):
self.client.network.create()
networks = self.client.network.get_all()
self.assertIsNotNone(networks)
self.assertEqual(num_networks, len(networks))
def test_create_from(self):
print('\n---------- Loading network tests start -----------\n')
# Create from URL
locations = [
'http://chianti.ucsd.edu/cytoscape-data/galFiltered.sif',
'http://www.ebi.ac.uk/Tools/webservices/psicquic/intact/'
+ 'webservices/current/search/interactor/brca2_human?format=xml25'
]
networks = self.client.network.create_from(locations)
print(networks)
self.assertIsNotNone(networks)
print('---------- Loading networks done! -----------\n')
def test_network_api(self):
print('\n---------- Network API Tests Start -----------\n')
# Create empty network
for i in range(5):
self.client.network.create()
networks = self.client.network.get_all(format='json')
self.assertIsNotNone(networks)
pp(networks)
def test_cynetwork(self):
print('\n---------- CyNetwork Tests Start -----------\n')
network = self.client.network.create()
self.assertIsNotNone(network)
nodes = network.get_nodes()
pp(nodes)
new_nodes = ['a', 'b', 'c']
nd = network.add_nodes(new_nodes)
self.assertIsNotNone(nd)
pp(nd)
edge = network.add_edge(nd['a'], nd['b'])
self.assertIsNotNone(edge)
pp(edge)
new_edges = ((nd['a'], nd['b'], 'i1'), (nd['a'], nd['c'], 'i1'))
new_edges_result = network.add_edges(new_edges)
print(new_edges_result)
self.assertEqual(2, len(new_edges_result))
node_table = network.get_node_table()
print(node_table)
node_table = network.get_node_table(format='tsv')
print(node_table)
node_table = network.get_node_table(format='csv')
print(node_table)
edge_table = network.get_edge_table()
print(edge_table)
print('\n---------- CyNetwork Tests Finished! -----------\n')
def test_create_view(self):
print('\n---------- CyNetworkView Tests Start -----------\n')
network = self.client.network.create()
self.assertIsNotNone(network)
nodes = network.get_nodes()
pp(nodes)
new_nodes = ['a', 'b', 'c']
nd = network.add_nodes(new_nodes)
views = network.get_views()
print('Views: ')
print(views)
view_id = views[0]
view = network.get_view(view_id, format='view')
self.assertIsNotNone(view)
self.assertEqual(view_id, view.get_id())
node_views = view.get_node_views()
self.assertIsNotNone(node_views)
self.assertEqual(3, len(node_views))
view1 = node_views[0]
self.assertIsNotNone(view1)
all_values = view1.get_values()
print(all_values)
def test_view_api(self):
print('\n---------- View API test start -----------\n')
network = self.client.network.create()
self.assertIsNotNone(network)
nodes = network.get_nodes()
pp(nodes)
new_nodes = ['a', 'b', 'c']
nd = network.add_nodes(new_nodes)
print(nd)
views = network.get_views()
print('Views: ')
print(views)
view_id = views[0]
view = network.get_view(view_id, format='view')
self.assertIsNotNone(view)
self.assertEqual(view_id, view.get_id())
node_views = view.get_node_views()
self.assertIsNotNone(node_views)
self.assertEqual(3, len(node_views))
view1 = node_views[0]
self.assertIsNotNone(view1)
self.assertEqual(0, view1.get_x())
view1.set_x(100)
self.assertEqual(100, view1.get_value('NODE_X_LOCATION'))
new_values = {}
for key in nd.keys():
suid = nd[key]
new_values[suid] = 'red'
view.update_node_views('NODE_FILL_COLOR', new_values)
new_values_name = {}
for node_name in new_nodes:
new_values_name[node_name] = 'pink'
view.update_node_views('NODE_FILL_COLOR', new_values_name, key_type='name')
view.update_network_view('NETWORK_BACKGROUND_PAINT', 'red')
net_view = view.get_network_view_as_dict()
bg_paint = net_view['NETWORK_BACKGROUND_PAINT']
self.assertEqual('#FF0000', bg_paint)
def test_convert(self):
print('\n---------- DataFrame Conversion Tests Start -----------\n')
import os
import pandas as pd
# Clean up Cytoscape session
self.client.session.delete()
dir_name = os.path.dirname(os.path.realpath(__file__))
df = pd.read_csv(
dir_name + '/data/galFiltered.sif',
names=['source', 'interaction', 'target'], sep=' ')
print(df.head(3))
net = df_util.from_dataframe(df)
network = self.client.network.create(data=net, name='Created from DataFrame')
original_column_count = len(network.get_node_columns())
dir_name = os.path.dirname(os.path.realpath(__file__))
file_name = dir_name + '/data/galFiltered.nodeAttrTable.txt'
data_table = pd.read_csv(file_name, sep='\t')
network.update_node_table(df=data_table, data_key_col='ID')
table_column_count = len(data_table.columns)
total_column_count = len(network.get_node_columns())
self.assertEqual(total_column_count, (original_column_count+table_column_count-1))
print('\n---------- DataFrame Conversion Tests Finished! -----------\n')
def test_delete_network(self):
network = self.client.network.create()
suids = self.client.network.get_all()
self.assertEqual(1, len(suids))
self.client.network.delete(network)
suids = self.client.network.get_all()
self.assertEqual(0, len(suids))
def test_create_from_networkx(self):
networkx1 = nx.scale_free_graph(100)
network = self.client.network.create_from_networkx(networkx1)
self.assertIsNotNone(network)
| mit |
RayMick/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
ocefpaf/python-ctd | ctd/extras.py | 2 | 8451 | """
Extra functionality for plotting and post-processing.
"""
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
from pandas import Series
def _extrap1d(interpolator):
"""
How to make scipy.interpolate return an extrapolated result beyond the
input range.
This is usually bad interpolation! But sometimes useful for pretty pictures,
use it with caution!
http://stackoverflow.com/questions/2745329/
"""
xs, ys = interpolator.x, interpolator.y
def pointwise(x):
"""Pointwise interpolation."""
if x < xs[0]:
return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0])
elif x > xs[-1]:
return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / (xs[-1] - xs[-2])
else:
return interpolator(x)
def ufunclike(xs):
"""Return an interpolation ufunc."""
return np.array(list(map(pointwise, np.array(xs))))
return ufunclike
def get_maxdepth(self):
"""Return the maximum depth/pressure of a cast."""
valid_last_depth = self.apply(Series.notnull).values.T
return np.float_(self.index.values * valid_last_depth).max(axis=1)
def extrap_sec(data, dist, depth, w1=1.0, w2=0):
"""
Extrapolates `data` to zones where the shallow stations are shadowed by
the deep stations. The shadow region usually cannot be extrapolates via
linear interpolation.
The extrapolation is applied using the gradients of the `data` at a certain
level.
Parameters
----------
data : array_like
Data to be extrapolated
dist : array_like
Stations distance
fd : float
Decay factor [0-1]
Returns
-------
Sec_extrap : array_like
Extrapolated variable
"""
from scipy.interpolate import interp1d
new_data1 = []
for row in data:
mask = ~np.isnan(row)
if mask.any():
y = row[mask]
if y.size == 1:
row = np.repeat(y, len(mask))
else:
x = dist[mask]
f_i = interp1d(x, y)
f_x = _extrap1d(f_i)
row = f_x(dist)
new_data1.append(row)
new_data2 = []
for col in data.T:
mask = ~np.isnan(col)
if mask.any():
y = col[mask]
if y.size == 1:
col = np.repeat(y, len(mask))
else:
z = depth[mask]
f_i = interp1d(z, y)
f_z = _extrap1d(f_i)
col = f_z(depth)
new_data2.append(col)
new_data = np.array(new_data1) * w1 + np.array(new_data2).T * w2
return new_data
def gen_topomask(h, lon, lat, dx=1.0, kind="linear", plot=False):
"""
Generates a topography mask from an oceanographic transect taking the
deepest CTD scan as the depth of each station.
Inputs
------
h : array
Pressure of the deepest CTD scan for each station [dbar].
lons : array
Longitude of each station [decimal degrees east].
lat : Latitude of each station. [decimal degrees north].
dx : float
Horizontal resolution of the output arrays [km].
kind : string, optional
Type of the interpolation to be performed.
See scipy.interpolate.interp1d documentation for details.
plot : bool
Whether to plot mask for visualization.
Outputs
-------
xm : array
Horizontal distances [km].
hm : array
Local depth [m].
Author
------
André Palóczy Filho ([email protected]) -- October/2012
"""
import gsw
from scipy.interpolate import interp1d
h, lon, lat = list(map(np.asanyarray, (h, lon, lat)))
# Distance in km.
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
h = -gsw.z_from_p(h, lat.mean())
Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1])
xm = np.arange(0, x.max() + dx, dx)
hm = Ih(xm)
return xm, hm
def plot_section(self, reverse=False, filled=False, **kw):
"""Plot a sequence of CTD casts as a section."""
import gsw
lon, lat, data = list(map(np.asanyarray, (self.lon, self.lat, self.values)))
data = ma.masked_invalid(data)
h = self.get_maxdepth()
if reverse:
lon = lon[::-1]
lat = lat[::-1]
data = data.T[::-1].T
h = h[::-1]
lon, lat = map(np.atleast_2d, (lon, lat))
x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
z = self.index.values.astype(float)
if filled: # CAVEAT: this method cause discontinuities.
data = data.filled(fill_value=np.nan)
data = extrap_sec(data, x, z, w1=0.97, w2=0.03)
# Contour key words.
extend = kw.pop("extend", "both")
fontsize = kw.pop("fontsize", 12)
labelsize = kw.pop("labelsize", 11)
cmap = kw.pop("cmap", plt.cm.rainbow)
levels = kw.pop(
"levels",
np.arange(np.floor(data.min()), np.ceil(data.max()) + 0.5, 0.5),
)
# Colorbar key words.
pad = kw.pop("pad", 0.04)
aspect = kw.pop("aspect", 40)
shrink = kw.pop("shrink", 0.9)
fraction = kw.pop("fraction", 0.05)
# Topography mask key words.
dx = kw.pop("dx", 1.0)
kind = kw.pop("kind", "linear")
linewidth = kw.pop("linewidth", 1.5)
# Station symbols key words.
station_marker = kw.pop("station_marker", None)
color = kw.pop("color", "k")
offset = kw.pop("offset", -5)
alpha = kw.pop("alpha", 0.5)
# Figure.
figsize = kw.pop("figsize", (12, 6))
fig, ax = plt.subplots(figsize=figsize)
xm, hm = gen_topomask(h, lon, lat, dx=dx, kind=kind)
ax.plot(xm, hm, color="black", linewidth=linewidth, zorder=3)
ax.fill_between(xm, hm, y2=hm.max(), color="0.9", zorder=3)
if station_marker:
ax.plot(
x,
[offset] * len(h),
color=color,
marker=station_marker,
alpha=alpha,
zorder=5,
)
ax.set_xlabel("Cross-shore distance [km]", fontsize=fontsize)
ax.set_ylabel("Depth [m]", fontsize=fontsize)
ax.set_ylim(offset, hm.max())
ax.invert_yaxis()
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_label_position("top")
ax.yaxis.set_ticks_position("left")
ax.yaxis.set_label_position("left")
ax.xaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
ax.yaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
# Color version.
cs = ax.contourf(
x,
z,
data,
cmap=cmap,
levels=levels,
alpha=1.0,
extend=extend,
zorder=2,
) # manual=True
# Colorbar.
cb = fig.colorbar(
mappable=cs,
ax=ax,
orientation="vertical",
aspect=aspect,
shrink=shrink,
fraction=fraction,
pad=pad,
)
return fig, ax, cb
def cell_thermal_mass(temperature, conductivity):
"""
Sample interval is measured in seconds.
Temperature in degrees.
CTM is calculated in S/m.
"""
alpha = 0.03 # Thermal anomaly amplitude.
beta = 1.0 / 7 # Thermal anomaly time constant (1/beta).
sample_interval = 1 / 15.0
a = 2 * alpha / (sample_interval * beta + 2)
b = 1 - (2 * a / alpha)
dCodT = 0.1 * (1 + 0.006 * [temperature - 20])
dT = np.diff(temperature)
ctm = -1.0 * b * conductivity + a * (dCodT) * dT # [S/m]
return ctm
def mixed_layer_depth(CT, method="half degree"):
"""Return the mixed layer depth based on the "half degree" criteria."""
if method == "half degree":
mask = CT[0] - CT < 0.5
else:
mask = np.zeros_like(CT)
return Series(mask, index=CT.index, name="MLD")
def barrier_layer_thickness(SA, CT):
"""
Compute the thickness of water separating the mixed surface layer from the
thermocline. A more precise definition would be the difference between
mixed layer depth (MLD) calculated from temperature minus the mixed layer
depth calculated using density.
"""
import gsw
sigma_theta = gsw.sigma0(SA, CT)
mask = mixed_layer_depth(CT)
mld = np.where(mask)[0][-1]
sig_surface = sigma_theta[0]
sig_bottom_mld = gsw.sigma0(SA[0], CT[mld])
d_sig_t = sig_surface - sig_bottom_mld
d_sig = sigma_theta - sig_bottom_mld
mask = d_sig < d_sig_t # Barrier layer.
return Series(mask, index=SA.index, name="BLT")
| bsd-3-clause |
simpeg/processing | processing/DCIPtools/plottingTest.py | 1 | 3721 |
import DCIPtools as DCIP
import matplotlib.pyplot as plt
import numpy as np
################################################################
# Read binary time-series ======================================
def getData():
xyz = open("/Users/juan/Documents/testData/L100_R42_SR_PG.xyz")
t = []
xt = []
samples_half_t = 600.0
for line in xyz:
x, y = line.split()
t.append(float(x))
xt.append(float(y))
xyz.close()
xt = np.asarray(xt)
num_half_T = np.round(xt.size / samples_half_t) - 2
trim_length = num_half_T * samples_half_t
xt = xt[0:int(trim_length)]
xt = np.asarray(xt)
return xt, num_half_T
def getTime():
timeFrom = [2040., 2060., 2080., 2120., 2160., 2200.,
2240., 2320., 2400.,
2480., 2560., 2640.,
2720., 2800., 2960.,
3120., 3280., 3440.,
3600., 3760.]
timeTo = [2060., 2080., 2120., 2160., 2200., 2240.,
2320., 2400., 2480., 2560., 2640., 2720.,
2800., 2960., 3120., 3280., 3440.,
3600., 3760., 3920.]
return timeFrom, timeTo
# end read =====================================================
# stack the data ===================================================
xt, num_half_T = getData()
timeFrom, timeTo = getTime()
start_vp = 50 # start of Vp calculation (%)
end_vp = 90 # end of Vp calculation (%)
window = DCIP.createHanningWindow(num_half_T) # creates filter window
window3 = DCIP.createHanningWindow(7) # creates filter window
window2 = DCIP.createBruteStackWindow(int(num_half_T))
# print(window2.size, window.size)
tHK = DCIP.filterKernal(filtershape=window) # creates filter kernal
# eHK = DCIP.ensembleKernal(filtershape=window3,
# number_half_periods=num_half_T)
dkernal = DCIP.decayKernal(num_windows=np.asarray(timeTo).size,
window_starts=np.asarray(timeFrom),
window_ends=np.asarray(timeTo),
window_weight=301,
window_overlap=0.95,
output_type="Vs") # creates decay kernal
stack = tHK * xt # stack data
# ens = eHK * xt
# plt.plot(stack)
# plt.show()
decay = dkernal * (tHK * xt) # calculates the decay
# end ======================================================
# cole-cole fitting =========================================
Vp = DCIP.getPrimaryVoltage(start_vp,
end_vp,
stack) # calculate the Vp
staticmethod
# # end =======================================================
# plt.plot(dkernal.getWindowCenters(), win_std)
# plt.show()
# plot results
fig = plt.figure(figsize=(10, 8))
ax1 = plt.subplot(311)
ax1.plot(xt, 'r')
ax1.set_xlabel("num samples")
ax1.set_ylabel("voltage (mV)")
ax1.set_title("Raw time-series")
ax4 = plt.subplot(323)
ax4.plot(tHK.getFilterKernal, 'og')
ax4.set_xlabel("num taps")
ax4.set_ylabel("amplitude")
amp = DCIP.getFrequnceyResponse(window)
freqs = np.arange(0, amp.size) * (150. / window.size)
ax5 = plt.subplot(324)
ax5.plot(freqs, amp, 'm')
ax5.set_xlabel("frequency (Hz)")
ax5.set_ylabel("amplitude")
ax2 = plt.subplot(326)
ax2.plot(dkernal.getWindowCenters(), decay, '-ko')
# ax2.plot(dkernal.getWindowCenters(), decay, '.')
# ax2.plot(dkernal.getWindowCenters(), vs, '-o')
ax2.set_xlabel("time (ms)")
ax2.set_ylabel("Voltage (mV)")
ax2.set_title("Secondary Voltage (decay)")
ax3 = plt.subplot(325)
ax3.plot(tHK * -xt)
ax3.set_xlabel("num samples")
ax3.set_ylabel("Voltage (mV)")
ax3.set_title("Stack (decay)")
plt.show()
| mit |
LouisPlisso/analysis_tools | cdfplot_1.2.py | 1 | 14651 | #!/usr/bin/env python
"Module to plot cdf from data or file. Can be called directly."
from __future__ import division, print_function
from optparse import OptionParser
import sys
# in case of non-interactive usage
#import matplotlib
#matplotlib.use('Agg')
import pylab
from matplotlib.font_manager import FontProperties
import numpy as np
#import matplotlib.pyplot as plt
_VERSION = '1.2'
#TODO: possibility to place legend outside graph:
#pylab.subfigure(111)
#pylab.subplots_adjust(right=0.8) or (top=0.8)
#pylab.legend(loc=(1.1, 0.5)
#CCDF
def ccdfplotdataN(list_data_name, _xlabel='x', _ylabel=r'1 - P(X$\leq$x)',
_title='Empirical Distribution', _fs='x-large',
_fs_legend='medium', _loc = 0, do_color=True, logx=True, logy=False):
"Plot the ccdf of a list of data arrays and names"
#corresponding line width with larger width for '-.' and ':'
if not list_data_name:
print("no data to plot", file=sys.stderr)
return
_ls = ['-', '-.', '-', '--'] * 2 #, ':']
# _lw = [1, 1] + [2, 4, 2, 4, 2, 4]#, 4]
_lw = [2, 4] + [2, 4, 2, 4, 2, 4]#, 4]
assert len(_ls) == len(_lw)
# _colors = ['k', 'k', 'g', 'c', 'm', 'r', 'y', 'pink']
# consequent plots are same color
_colors = ['k', 'k', 'c', 'c', 'm', 'm', 'y', 'y']
_ls_len = len(_ls)
for i in range(len(list_data_name)):# - 1):
name, data = list_data_name[i]
#plot with round robin line style (ls)
#and increasing line width
(div, mod) = divmod(i, _ls_len)
if not do_color:
color = 'k'
# line_width = _lw[mod]+2*div
else:
color = _colors[i % len(_colors)]
# line_width = 2 + div
line_width = _lw[mod]+2*div
ccdfplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
_ylabel=_ylabel, _lw=line_width, _ls=_ls[mod], _fs=_fs,
_color=color)
if logx and logy:
setgraph_loglog(_loc=_loc, _fs_legend=_fs_legend)
elif logy:
setgraph_logy(_loc=_loc, _fs_legend=_fs_legend)
elif logx:
setgraph_logx(_loc=_loc, _fs_legend=_fs_legend)
else:
setgraph_lin(_loc=_loc, _fs_legend=_fs_legend)
def ccdfplotdata(data_in, _color='k', _xlabel='x', _ylabel=r'1 - P(X$\leq$x)',
_title='Empirical Distribution', _name='Data', _lw=2, _fs='x-large',
_fs_legend='medium', _ls = '-', _loc=0):
"Plot the ccdf of a data array"
data = sorted(filter(lambda x: x is not None, data_in))
data_len = len(data)
if data_len == 0:
print("no data to plot", file=sys.stderr)
return
ccdf = 1 - np.arange(data_len + 1) / data_len
data.append(data[-1])
pylab.plot(data, ccdf, _color, lw = _lw, drawstyle = 'steps',
label = _name + ': %d' % data_len, ls = _ls)
pylab.xlabel(_xlabel, size = _fs)
pylab.ylabel(_ylabel, size = _fs)
pylab.title(_title, size = _fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def ccdfplot(_file, col = 0, xlabel = 'X', ylabel = r'1 - P(X$\leq$x)',
title = 'Empirical Distribution', name = 'Data',
_lw = 2, _fs = 'x-large', _ls = '-', _loc=0):
"Plot the ccdf of a column in file"
data = pylab.loadtxt(_file, usecols = [col])
ccdfplotdata(data, _xlabel = xlabel, _ylabel = ylabel,
_title = title, _name = name,
_lw = _lw, _fs = _fs, _ls = _ls, _loc = _loc)
#CDF
def cdfplotdataN(list_data_name, _xlabel = 'x', _ylabel = r'P(X$\leq$x)',
_title = 'Empirical Distribution', _fs = 'x-large',
_fs_legend='medium', _loc = 0, do_color=True, logx=True, logy=False):
"Plot the cdf of a list of names and data arrays"
#corresponding line width with larger width for '-.' and ':'
if not list_data_name:
print("no data to plot", file=sys.stderr)
return
#_ls = ['-', '-.', '-', '--'] * 2 #, ':']
_ls = ['-', '-.', '-', '--'] * 2 + [':'] * 8
# _lw = [1, 1] + [2, 4, 2, 4, 2, 4]#, 4]
_lw = ([2, 4] + [2, 4, 2, 4, 2, 4]) + ([4, 6] + [4, 6, 4, 6, 4, 6])
assert len(_ls) == len(_lw)
# _colors = ['k', 'k', 'g', 'c', 'm', 'r', 'y', 'pink']
# consequent plots are same color
_colors = ['k', 'k', 'c', 'c', 'm', 'm', 'y', 'y']
for i in range(len(list_data_name)):# - 1):
name, data = list_data_name[i]
#plot with round robin line style (ls)
#and increasing line width
(div, mod) = divmod(i, len(_ls))
if not do_color:
color = 'k'
# line_width = _lw[mod]+2*div
else:
color = _colors[i % len(_colors)]
# line_width = 2 + div
line_width = _lw[mod]+2*div
cdfplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
_ylabel=_ylabel, _lw=line_width, _ls=_ls[mod], _fs=_fs,
_color=color)
if logx and logy:
setgraph_loglog(_loc=_loc, _fs_legend=_fs_legend)
elif logy:
setgraph_logy(_loc=_loc, _fs_legend=_fs_legend)
elif logx:
setgraph_logx(_loc=_loc, _fs_legend=_fs_legend)
else:
setgraph_lin(_loc=_loc, _fs_legend=_fs_legend)
# cdfplotdata(data, _name=name, _lw=line_width, _ls=_ls[mod],
# _fs=_fs, _color=color)
# for last cdf, we put the legend and names
# (data, name) = list_data_name[-1]
# (div, mod) = divmod(len(list_data_name), len(_ls))
# if not do_color:
# color = 'k'
# line_width = _lw[mod]+2*div
# else:
# color = _colors[i % len(_colors)]
# line_width = 1 + div
# cdfplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
# _ylabel=_ylabel, _lw=line_width, _ls=_ls[mod], _fs=_fs,
# _fs_legend=_fs_legend, _color=color)
def cdfplotdata(data_in, _color='k', _xlabel='x', _ylabel=r'P(X$\leq$x)',
_title='Empirical Distribution', _name='Data', _lw=2, _fs='x-large',
_fs_legend='medium', _ls = '-', _loc=0):
"Plot the cdf of a data array"
# data = pylab.array(data_in, copy=True)
# to remove the None objects
data = sorted(filter(lambda x: x is not None, data_in))
data_len = len(data)
if data_len == 0:
print("no data to plot", file=sys.stderr)
return
cdf = np.arange(data_len + 1) / data_len
data.append(data[-1])
pylab.plot(data, cdf, _color, lw = _lw, drawstyle = 'steps',
label = _name + ': %d' % data_len, ls = _ls)
pylab.xlabel(_xlabel, size = _fs)
pylab.ylabel(_ylabel, size = _fs)
pylab.title(_title, size = _fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def cdfplot(_file, col = 0, xlabel = 'X',
ylabel = r'P(X$\leq$x)',
title = 'Empirical Distribution', name = 'Data',
_lw = 2, _fs = 'x-large', _ls = '-', _loc=0):
"Plot the cdf of a column in file"
data = pylab.loadtxt(_file, usecols = [col])
cdfplotdata(data, _xlabel = xlabel, _ylabel = ylabel,
_title = title, _name = name,
_lw = _lw, _fs = _fs, _ls = _ls, _loc = _loc)
def setgraph_lin(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def setgraph_logx(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
pylab.semilogx(nonposy='clip', nonposx='clip')
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def setgraph_loglog(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
pylab.loglog(nonposy='clip', nonposx='clip')
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def setgraph_logy(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
pylab.semilogy(nonposy='clip', nonposx='clip')
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
#repartition plots
def repartplotdataN(list_data_name, _xlabel = 'Rank',
_ylabel = 'Cumulative Percentage of Data',
_title = 'Repartition of values',
_fs = 'x-large', do_color=True, _loc=0, loglog=True, savefile=None):
"Plot the repartition of a list of data arrays and names"
#corresponding line width with larger width for '-.' and ':'
if not list_data_name:
print("no data to plot", file=sys.stderr)
return
# while adapting cdfplot v2
pylab.clf()
_ls = ['-', '-.', '-', '--'] * 2 #, ':']
# _ls = ['-', '-.', '--', ':']
_lw = [2, 4] + [2, 4, 2, 4, 2, 4]#, 4]
# _lw = [1, 2, 3, 4]
assert len(_ls) == len(_lw)
_len_ls = len(_ls)
# consequent plots are same color
_colors = ['k', 'k', 'c', 'c', 'm', 'm', 'y', 'y']
for i, (name, data) in enumerate(list_data_name):
#range(len(list_data_name)):# - 1):
#name, data = list_data_name[i]
#plot with round robin line style (ls)
#and increasing line width
(div, mod) = divmod(i, _len_ls)
if not do_color:
color = 'k'
# line_width = _lw[mod]+2*div
else:
color = _colors[i % len(_colors)]
# line_width = 2 + div
line_width = _lw[mod]+2*div
repartplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
_ylabel=_ylabel, _lw=line_width, _ls=_ls[mod], _fs=_fs,
_color=color)
if loglog:
setgraph_loglog(_loc=_loc)
else:
setgraph_lin(_loc=_loc)
if savefile:
pylab.savefig(savefile)
# #for last cdf, we put the legend and names
# (name, data) = list_data_name[-1]
# (div, mod) = divmod(len(list_data_name), _len_ls)
# repartplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
# _ylabel=_ylabel, _lw=_lw[mod]+2*div, _ls=_ls[mod], _fs=_fs)
# setgraph_loglog(_loc=_loc)
def repartplotdata(data_in, _color='k', _xlabel = 'Rank',
_ylabel = 'Cumulative Percentage of Data',
_title = 'Repartition of values', _name = 'Data', _lw = 2,
_fs = 'x-large', _fs_legend='medium', _ls = '-', _loc=0):
"Plot the repartition of a data array"
data = pylab.array(data_in, copy=True)
if len(data) == 0:
print("no data to plot", file=sys.stderr)
return
data.sort()
rank = np.arange(1, len(data) + 1)
values = np.cumsum(data[::-1])
pylab.plot(rank, 100 * values / values[-1], _color, lw = _lw,
drawstyle = 'steps', label = _name + ': %d' % len(data),
ls = _ls)
pylab.xlabel(_xlabel, size = _fs)
pylab.ylabel(_ylabel, size = _fs)
pylab.title(_title, size = _fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def repartplot(_file, col = 0, xlabel = 'Rank',
ylabel = 'Cumulative Percentage of Data',
title = 'Repartition of values', name = 'Data',
_lw = 2, _fs = 'x-large', _ls = '-', _loc=0):
"Plot the cdf of a column in file"
data = pylab.loadtxt(_file, usecols = [col])
repartplotdata(data, _xlabel = xlabel, _ylabel = ylabel,
_title = title, _name = name,
_lw = _lw, _fs = _fs, _ls = _ls, _loc = _loc)
def main():
"Program wrapper."
usage = "%prog -r data_file [-c col -x x_label -y y_label -t title \
-n data_name -lw line_width -fs fontsize [-g|-p]]"
parser = OptionParser(usage = usage, version="%prog " + _VERSION)
parser.add_option("-r", dest = "file",
help = "input data file or stdin if FILE is -")
parser.add_option("-c", dest = "col", type = "int", default = 0,
help = "column in the file [default value = 0]")
parser.add_option("-x", dest = "xlabel", default = 'X',
help = "x label")
parser.add_option("-y", dest = "ylabel",
default = r'P(X$\leq$x)', help = "y label")
parser.add_option("-t", dest = "title",
default = 'Empirical Distribution',
help = "graph title")
parser.add_option("-n", dest = "name", default = 'Data',
help = "data name")
parser.add_option("-l", "--lw", dest = "lw", type = "int",
default = 2, help = "line width")
parser.add_option("-f", "--fs", dest = "fs", type = "int",
default = 18, help = "font size")
parser.add_option("-g", "--ccdf", dest = "g",
action="store_true", default=False,
help = "plot ccdf instead of cdf")
parser.add_option("-p", "--repartition", dest = "p",
action="store_true", default=False,
help = "plot repartition instead of cdf")
(options, _) = parser.parse_args()
if not options.file:
print("Must provide filename.")
parser.print_help()
exit(1)
if options.file == '-':
out_file = sys.stdin
else:
try:
out_file = open(options.file, 'r')
except IOError:
print("File, %s, does not exist." % options.file)
parser.print_help()
exit(1)
if options.g and options.p:
print("g and p options are exclusive.")
parser.print_help()
exit(1)
pylab.clf()
if options.g:
ccdfplot(out_file, col=options.col, _lw=options.lw, _fs=options.fs)
elif options.p:
repartplot(out_file, col=options.col, _lw=options.lw, _fs=options.fs)
else:
cdfplot(out_file, col=options.col, xlabel=options.xlabel,
ylabel=options.ylabel, title=options.title,
name=options.name, _lw=options.lw, _fs=options.fs)
setgraph_logx(_fs = options.fs)
pylab.show()
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
pradyu1993/scikit-learn | examples/linear_model/plot_omp.py | 6 | 1677 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print __doc__
import pylab as pl
import numpy as np
from sklearn.linear_model import orthogonal_mp
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_atoms = 17
# generate the data
###################
# y = Dx
# |x|_0 = n_atoms
y, D, x = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_atoms,
random_state=0)
idx, = x.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
pl.subplot(3, 1, 1)
pl.xlim(0, 512)
pl.title("Sparse signal")
pl.stem(idx, x[idx])
# plot the noise-free reconstruction
####################################
x_r = orthogonal_mp(D, y, n_atoms)
idx_r, = x_r.nonzero()
pl.subplot(3, 1, 2)
pl.xlim(0, 512)
pl.title("Recovered signal from noise-free measurements")
pl.stem(idx_r, x_r[idx_r])
# plot the noisy reconstruction
###############################
x_r = orthogonal_mp(D, y_noisy, n_atoms)
idx_r, = x_r.nonzero()
pl.subplot(3, 1, 3)
pl.xlim(0, 512)
pl.title("Recovered signal from noisy measurements")
pl.stem(idx_r, x_r[idx_r])
pl.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
pl.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
pl.show()
| bsd-3-clause |
kklmn/xrt | examples/withRaycing/01_SynchrotronSources/undulatorTapering.py | 1 | 4236 | # -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev", "Roman Chernikov"
__date__ = "08 Mar 2016"
import pickle
import numpy as np
import matplotlib.pyplot as plt
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import xrt.backends.raycing as raycing
import xrt.backends.raycing.sources as rs
import xrt.backends.raycing.screens as rsc
import xrt.backends.raycing.run as rr
import xrt.backends.raycing.materials as rm
import xrt.plotter as xrtp
import xrt.runner as xrtr
showIn3D = False
prefix = 'taper_'
xlimits = [-0.9, 0.9]
zlimits = [-0.9, 0.9]
eMin, eMax = 10200-800, 10200+800
def build_beamline(nrays=2e5):
beamLine = raycing.BeamLine()
rs.Undulator(
beamLine, 'P06', nrays=nrays, eEspread=0.0011,
eSigmaX=34.64, eSigmaZ=6.285, eEpsilonX=1., eEpsilonZ=0.01,
period=31.4, K=2.1392-0.002, n=63, eE=6.08, eI=0.1, xPrimeMax=1.5e-2,
zPrimeMax=1.5e-2, eMin=eMin, eMax=eMax, distE='BW',
xPrimeMaxAutoReduce=False, zPrimeMaxAutoReduce=False,
# targetOpenCL='CPU',
taper=(1.09, 11.254))
beamLine.fsm1 = rsc.Screen(beamLine, 'FSM1', (0, 90000, 0))
return beamLine
def run_process(beamLine):
beamSource = beamLine.sources[0].shine()
beamFSM1 = beamLine.fsm1.expose(beamSource)
outDict = {'beamSource': beamSource,
'beamFSM1': beamFSM1}
if showIn3D:
beamLine.prepare_flow()
return outDict
rr.run_process = run_process
def define_plots(beamLine):
plots = []
plotsE = []
xaxis = xrtp.XYCAxis(r'$x$', 'mm', limits=xlimits, bins=360, ppb=1)
yaxis = xrtp.XYCAxis(r'$z$', 'mm', limits=zlimits, bins=360, ppb=1)
caxis = xrtp.XYCAxis('energy', 'keV', bins=360, ppb=1)
plot = xrtp.XYCPlot(
'beamFSM1', (1,), xaxis=xaxis, yaxis=yaxis, caxis=caxis,
aspect='auto', title='total flux', ePos=1)
plot.baseName = prefix + '1TotalFlux'
plot.saveName = plot.baseName + '.png'
plots.append(plot)
plotsE.append(plot)
for plot in plotsE:
plot.caxis.limits = eMin*1e-3, eMax*1e-3
for plot in plots:
plot.fluxFormatStr = '%.2p'
return plots, plotsE
def afterScript(plots):
plot = plots[-1]
flux = [plot.intensity, plot.nRaysAll, plot.nRaysAccepted,
plot.nRaysSeeded]
cwd = os.getcwd()
pickleName = os.path.join(cwd, plot.baseName+'.pickle')
with open(pickleName, 'wb') as f:
pickle.dump((flux, plot.caxis.binEdges, plot.caxis.total1D), f,
protocol=2)
plot_compare()
def main():
beamLine = build_beamline()
if showIn3D:
beamLine.glow()
else:
plots, plotsE = define_plots(beamLine)
xrtr.run_ray_tracing(plots, repeats=100, beamLine=beamLine,
afterScript=afterScript, afterScriptArgs=[plots])
def plot_compare():
fig1 = plt.figure(1, figsize=(7, 5))
ax = plt.subplot(111, label='1')
ax.set_xlabel(u'energy (keV)')
ax.set_ylabel(u'flux (a.u.)')
cwd = os.getcwd()
pickleName = os.path.join(cwd, 'taper_1TotalFlux.pickle')
with open(pickleName, 'rb') as f:
_f, binEdges, total1D = pickle.load(f)
dE = binEdges[1] - binEdges[0]
E = binEdges[:-1] + dE/2.
ax.plot(E, total1D/max(total1D), 'r', label='calculated by xrt', lw=2)
try:
e, f = np.loadtxt('fluxUndulator1DtaperP06.dc0', skiprows=10,
usecols=[0, 1], unpack=True)
ax.plot(e*1e-3, f/max(f), 'b', label='calculated by Spectra', lw=2)
except: # analysis:ignore
pass
# e, f = np.loadtxt('yaup-0.out', skiprows=32, usecols=[0, 1], unpack=True)
# ax.plot(e*1e-3, f/max(f), 'g', label='calculated by YAUP/XOP', lw=2)
theta, fl = np.loadtxt("thetaexafssc1an_zn_hgap_00002r2.fio.gz",
skiprows=113, usecols=(0, 5), unpack=True)
si_1 = rm.CrystalSi(hkl=(1, 1, 1), tK=77)
E = rm.ch / (2 * si_1.d * np.sin(np.radians(theta)))
ax.plot(E*1e-3, fl/max(fl), 'k', lw=2, label='measured @ Petra3')
# ax2.set_xlim(0, None)
# ax2.set_ylim(1.400, 1.600)
ax.legend(loc='lower center')
fig1.savefig('compareTaper.png')
plt.show()
if __name__ == '__main__':
main()
| mit |
SANDAG/spandex | setup.py | 1 | 1323 | import os.path
# Install setuptools if not installed.
try:
import setuptools
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
# read README as the long description
readme = 'README' if os.path.exists('README') else 'README.md'
with open(readme, 'r') as f:
long_description = f.read()
setup(
name='spandex',
version='0.1dev',
description='Spatial Analysis and Data Extraction',
long_description=long_description,
author='Autodesk',
author_email='[email protected]',
url='https://github.com/udst/spandex',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
],
packages=find_packages(exclude=['*.tests']),
install_requires=[
'GeoAlchemy2>=0.2.1', # Bug fix for schemas other than public.
'pandas>=0.15.0', # pandas.Index.difference.
'psycopg2>=2.5', # connection and cursor context managers.
'six>=1.4', # Mapping for urllib.
'SQLAlchemy==0.9.9' # GeoAlchemy2 support.
],
extras_require={
'gdal': ['GDAL>=1.7'], # Python 3 support.
'plot': ['pygraphviz'],
'sim': ['urbansim>=1.3'], # TableFrame support and sim.table caching.
}
)
| bsd-3-clause |
BigTone2009/sms-tools | lectures/09-Sound-description/plots-code/loudness.py | 25 | 1337 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
energy = ess.Energy()
rms = ess.RMS()
loudness = ess.Loudness()
x = ess.MonoLoader(filename = '../../../sounds/piano.wav', sampleRate = fs)()
energies = []
rmss = []
loudnesses = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
energy_val = energy(frame)
energies.append(energy_val)
rms_val = rms(frame)
rmss.append(rms_val)
loudness_val = loudness(frame)
loudnesses.append(loudness_val)
energies = np.array(energies)/max(energies)
rmss = np.array(rmss)/max(rmss)
loudnesses = np.array(loudnesses)/max(loudnesses)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (piano.wav)')
plt.subplot(2,1,2)
frmTime = H*np.arange(energies.size)/float(fs)
plt.plot(frmTime, rmss, 'g', lw=1.5, label='normalized RMS')
plt.plot(frmTime, loudnesses, 'c', lw=1.5, label ='normalized loudness')
plt.plot(frmTime, energies, 'r', lw=1.5, label='normalized energy')
plt.axis([0, x.size/float(fs), 0, 1])
plt.ylabel('normalized value')
plt.legend()
plt.tight_layout()
plt.savefig('loudness.png')
plt.show()
| agpl-3.0 |
lucha93/RecFIN-Data-Analysis-Project | predict.py | 1 | 1522 | import sqlite3
import matplotlib.pyplot as plt
from collections import OrderedDict
import numpy as np
# Connect to database
conn = sqlite3.connect('sitedb.sqlite')
cur = conn.cursor()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
avg_list = []
std_list = []
for i in range(1, 13, 1):
stuff = cur.execute('''
SELECT * FROM Data WHERE month = ? ORDER BY date''', (str(i), ))
# catch from each month
month_catch = []
for line in stuff:
month_catch.append(line[7])
high = max(month_catch)
month_catch = month_catch.remove(high)
avg = np.mean(month_catch)
std_dev = np.std(month_catch)
# list of avg ordered by month
avg_list.append(avg)
# list of std deviations ordered by month
std_list.append(std_dev)
# Graph of normal distribution of predictions
for i in range(len(avg_list)):
mu = avg_list[i]
sigma = std_list[i]
s = np.random.normal(mu, sigma, 1000)
count, bins, ignored = plt.hist(s, 30, normed=True)
plt.title('Normal Distribution of Predicted Catch in %s' % months[i])
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
linewidth=2, color='r')
axes = plt.gca()
axes.set_xlim(0,)
plt.show()
#plt.figure(1)
#plt.bar(range(len(avg_catch)), avg_catch.values(), align='center')
#plt.xticks(range(len(avg_catch)), avg_catch.keys())
#plt.xlabel('Month')
#plt.ylabel('Average Catch')
#plt.title('Average Catch at Ventura County Shore Sites 2000-2010')
#plt.show()
| mit |
lehnertu/TEUFEL | scripts/plot_ObservationTimeDomain.py | 1 | 2983 | #!/usr/bin/env python
# coding=UTF-8
import sys, sdds, time
import os.path
import argparse
import numpy as np
import matplotlib.pyplot as plt
# magnetic field constant in N/A²
mu0 = 4*np.pi*1e-7
parser = argparse.ArgumentParser()
parser.add_argument('files', default=[], nargs='+', help='file name(s) for the stored population(s)')
# parser.add_argument('file', help='the name of the SDDS file with the observation field data')
parser.add_argument('--list_params', dest='listpar',
action='store_const', const=True, default=False, help='list all parameters available in the file')
parser.add_argument('--list_columns', dest='listcol',
action='store_const', const=True, default=False, help='list all columns available in the file')
args = parser.parse_args()
left, width = 0.15, 0.80
rect1 = [left, 0.55, width, 0.40] #left, bottom, width, height
rect2 = [left, 0.08, width, 0.40]
fig = plt.figure(1,figsize=(12,9))
ax1 = fig.add_axes(rect1)
ax4 = fig.add_axes(rect2, sharex=ax1)
for file in args.files:
fileOK = os.path.isfile(file)
if not fileOK:
print "file not found"
sys.exit()
print "reading ",file
data = sdds.SDDS(0)
data.load(file)
if args.listpar:
data.listParameters()
if args.listcol:
data.listColumns()
if ("t" in data.columnName):
t = np.array(data.getColumnData("t"))*1e9
if ("t0" in data.parameterName and
"dt" in data.parameterName and
"NumberTimeSteps" in data.parameterName):
t0 = data.getParameterValue("t0")
dt = data.getParameterValue("dt")
nots = data.getParameterValue("NumberTimeSteps")
print r'%d steps starting at t0=%12.3g s step dt=%12.3g s' % (nots,t0,dt)
t = (np.linspace(t0, t0+nots*dt, num=nots)+0.5*dt)*1e9
Ex = np.array(data.getColumnData("Ex"))
Ey = np.array(data.getColumnData("Ey"))
Ez = np.array(data.getColumnData("Ez"))
Bx = np.array(data.getColumnData("Bx"))
By = np.array(data.getColumnData("By"))
Bz = np.array(data.getColumnData("Bz"))
EVec = np.array([Ex, Ey, Ez]).transpose()
BVec = np.array([Bx, By, Bz]).transpose()
# Poynting vector in V/m * (N/(A m)) / (N/A²) = W/m²
SVec = np.cross(EVec, BVec) / mu0
if 'dt' in globals():
print 'energy flow density = ', SVec.sum(axis=0)*dt, " Ws/m²"
l1 = ax1.plot(t, Ex, "r-", label=r'$E_x$')
l2 = ax1.plot(t, Ey, "b-", label=r'$E_y$')
l3 = ax1.plot(t, Ez, "g-", label=r'$E_z$')
ax1.set_ylabel(r'$E$ [V/m]')
lines = l1 + l2 + l3
labels = [l.get_label() for l in lines]
ax1.legend(lines,labels,loc='upper right')
for label in ax1.get_xticklabels():
label.set_visible(False)
ax1.grid(True)
l4 = ax4.plot(t, Bx, "r-", label=r'$B_x$')
l5 = ax4.plot(t, By, "b-", label=r'$B_y$')
l6 = ax4.plot(t, Bz, "g-", label=r'$B_z$')
ax4.set_ylabel(r'$B$ [T]')
ax4.set_xlabel(r't [ns]')
lines = l4 + l5 +l6
labels = [l.get_label() for l in lines]
ax4.legend(lines,labels,loc='upper right')
ax4.grid(True)
plt.show()
| gpl-3.0 |
EricCline/CEM_inc | env/lib/python2.7/site-packages/IPython/kernel/inprocess/tests/test_kernel.py | 4 | 3250 | #-------------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Standard library imports
import sys
import unittest
# Local imports
from IPython.kernel.inprocess.blocking import BlockingInProcessKernelClient
from IPython.kernel.inprocess.manager import InProcessKernelManager
from IPython.kernel.inprocess.ipkernel import InProcessKernel
from IPython.testing.decorators import skipif_not_matplotlib
from IPython.utils.io import capture_output
from IPython.utils import py3compat
if py3compat.PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Test case
#-----------------------------------------------------------------------------
class InProcessKernelTestCase(unittest.TestCase):
def setUp(self):
self.km = InProcessKernelManager()
self.km.start_kernel()
self.kc = BlockingInProcessKernelClient(kernel=self.km.kernel)
self.kc.start_channels()
@skipif_not_matplotlib
def test_pylab(self):
""" Does pylab work in the in-process kernel?
"""
kc = self.kc
kc.execute('%pylab')
msg = get_stream_message(kc)
self.assert_('matplotlib' in msg['content']['data'])
def test_raw_input(self):
""" Does the in-process kernel handle raw_input correctly?
"""
io = StringIO('foobar\n')
sys_stdin = sys.stdin
sys.stdin = io
try:
if py3compat.PY3:
self.kc.execute('x = input()')
else:
self.kc.execute('x = raw_input()')
finally:
sys.stdin = sys_stdin
self.assertEqual(self.km.kernel.shell.user_ns.get('x'), 'foobar')
def test_stdout(self):
""" Does the in-process kernel correctly capture IO?
"""
kernel = InProcessKernel()
with capture_output() as io:
kernel.shell.run_cell('print("foo")')
self.assertEqual(io.stdout, 'foo\n')
kc = BlockingInProcessKernelClient(kernel=kernel)
kernel.frontends.append(kc)
kc.shell_channel.execute('print("bar")')
msg = get_stream_message(kc)
self.assertEqual(msg['content']['data'], 'bar\n')
#-----------------------------------------------------------------------------
# Utility functions
#-----------------------------------------------------------------------------
def get_stream_message(kernel_client, timeout=5):
""" Gets a single stream message synchronously from the sub channel.
"""
while True:
msg = kernel_client.get_iopub_msg(timeout=timeout)
if msg['header']['msg_type'] == 'stream':
return msg
if __name__ == '__main__':
unittest.main()
| mit |
wubr2000/zipline | tests/modelling/test_engine.py | 2 | 15301 | """
Tests for SimpleFFCEngine
"""
from __future__ import division
from unittest import TestCase
from numpy import (
full,
isnan,
nan,
)
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
date_range,
Int64Index,
rolling_mean,
Timestamp,
Series,
)
from pandas.util.testing import assert_frame_equal
from testfixtures import TempDirectory
from zipline.assets import AssetFinder
from zipline.data.equities import USEquityPricing
from zipline.data.ffc.synthetic import (
ConstantLoader,
MultiColumnLoader,
NullAdjustmentReader,
SyntheticDailyBarWriter,
)
from zipline.data.ffc.frame import (
DataFrameFFCLoader,
MULTIPLY,
)
from zipline.data.ffc.loaders.us_equity_pricing import (
BcolzDailyBarReader,
USEquityPricingLoader,
)
from zipline.finance.trading import TradingEnvironment
from zipline.modelling.engine import SimpleFFCEngine
from zipline.modelling.factor import TestingFactor
from zipline.modelling.factor.technical import (
MaxDrawdown,
SimpleMovingAverage,
)
from zipline.utils.lazyval import lazyval
from zipline.utils.test_utils import (
make_rotating_asset_info,
make_simple_asset_info,
product_upper_triangle,
check_arrays,
)
class RollingSumDifference(TestingFactor):
window_length = 3
inputs = [USEquityPricing.open, USEquityPricing.close]
def from_windows(self, open, close):
return (open - close).sum(axis=0)
class ConstantInputTestCase(TestCase):
def setUp(self):
self.constants = {
# Every day, assume every stock starts at 2, goes down to 1,
# goes up to 4, and finishes at 3.
USEquityPricing.low: 1,
USEquityPricing.open: 2,
USEquityPricing.close: 3,
USEquityPricing.high: 4,
}
self.assets = [1, 2, 3]
self.dates = date_range('2014-01-01', '2014-02-01', freq='D', tz='UTC')
self.loader = ConstantLoader(
constants=self.constants,
dates=self.dates,
assets=self.assets,
)
self.asset_info = make_simple_asset_info(
self.assets,
start_date=self.dates[0],
end_date=self.dates[-1],
)
self.asset_finder = AssetFinder(self.asset_info)
def test_bad_dates(self):
loader = self.loader
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
msg = "start_date must be before end_date .*"
with self.assertRaisesRegexp(ValueError, msg):
engine.factor_matrix({}, self.dates[2], self.dates[1])
with self.assertRaisesRegexp(ValueError, msg):
engine.factor_matrix({}, self.dates[2], self.dates[2])
def test_single_factor(self):
loader = self.loader
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
result_shape = (num_dates, num_assets) = (5, len(self.assets))
dates = self.dates[10:10 + num_dates]
factor = RollingSumDifference()
result = engine.factor_matrix({'f': factor}, dates[0], dates[-1])
self.assertEqual(set(result.columns), {'f'})
assert_array_equal(
result['f'].unstack().values,
full(result_shape, -factor.window_length),
)
def test_multiple_rolling_factors(self):
loader = self.loader
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
shape = num_dates, num_assets = (5, len(self.assets))
dates = self.dates[10:10 + num_dates]
short_factor = RollingSumDifference(window_length=3)
long_factor = RollingSumDifference(window_length=5)
high_factor = RollingSumDifference(
window_length=3,
inputs=[USEquityPricing.open, USEquityPricing.high],
)
results = engine.factor_matrix(
{'short': short_factor, 'long': long_factor, 'high': high_factor},
dates[0],
dates[-1],
)
self.assertEqual(set(results.columns), {'short', 'high', 'long'})
# row-wise sum over an array whose values are all (1 - 2)
assert_array_equal(
results['short'].unstack().values,
full(shape, -short_factor.window_length),
)
assert_array_equal(
results['long'].unstack().values,
full(shape, -long_factor.window_length),
)
# row-wise sum over an array whose values are all (1 - 3)
assert_array_equal(
results['high'].unstack().values,
full(shape, -2 * high_factor.window_length),
)
def test_numeric_factor(self):
constants = self.constants
loader = self.loader
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
num_dates = 5
dates = self.dates[10:10 + num_dates]
high, low = USEquityPricing.high, USEquityPricing.low
open, close = USEquityPricing.open, USEquityPricing.close
high_minus_low = RollingSumDifference(inputs=[high, low])
open_minus_close = RollingSumDifference(inputs=[open, close])
avg = (high_minus_low + open_minus_close) / 2
results = engine.factor_matrix(
{
'high_low': high_minus_low,
'open_close': open_minus_close,
'avg': avg,
},
dates[0],
dates[-1],
)
high_low_result = results['high_low'].unstack()
expected_high_low = 3.0 * (constants[high] - constants[low])
assert_frame_equal(
high_low_result,
DataFrame(
expected_high_low,
index=dates,
columns=self.assets,
)
)
open_close_result = results['open_close'].unstack()
expected_open_close = 3.0 * (constants[open] - constants[close])
assert_frame_equal(
open_close_result,
DataFrame(
expected_open_close,
index=dates,
columns=self.assets,
)
)
avg_result = results['avg'].unstack()
expected_avg = (expected_high_low + expected_open_close) / 2.0
assert_frame_equal(
avg_result,
DataFrame(
expected_avg,
index=dates,
columns=self.assets,
)
)
class FrameInputTestCase(TestCase):
def setUp(self):
env = TradingEnvironment.instance()
day = env.trading_day
self.assets = Int64Index([1, 2, 3])
self.dates = date_range(
'2015-01-01',
'2015-01-31',
freq=day,
tz='UTC',
)
asset_info = make_simple_asset_info(
self.assets,
start_date=self.dates[0],
end_date=self.dates[-1],
)
self.asset_finder = AssetFinder(asset_info)
@lazyval
def base_mask(self):
return self.make_frame(True)
def make_frame(self, data):
return DataFrame(data, columns=self.assets, index=self.dates)
def test_compute_with_adjustments(self):
dates, assets = self.dates, self.assets
low, high = USEquityPricing.low, USEquityPricing.high
apply_idxs = [3, 10, 16]
def apply_date(idx, offset=0):
return dates[apply_idxs[idx] + offset]
adjustments = DataFrame.from_records(
[
dict(
kind=MULTIPLY,
sid=assets[1],
value=2.0,
start_date=None,
end_date=apply_date(0, offset=-1),
apply_date=apply_date(0),
),
dict(
kind=MULTIPLY,
sid=assets[1],
value=3.0,
start_date=None,
end_date=apply_date(1, offset=-1),
apply_date=apply_date(1),
),
dict(
kind=MULTIPLY,
sid=assets[1],
value=5.0,
start_date=None,
end_date=apply_date(2, offset=-1),
apply_date=apply_date(2),
),
]
)
low_base = DataFrame(self.make_frame(30.0))
low_loader = DataFrameFFCLoader(low, low_base.copy(), adjustments=None)
# Pre-apply inverse of adjustments to the baseline.
high_base = DataFrame(self.make_frame(30.0))
high_base.iloc[:apply_idxs[0], 1] /= 2.0
high_base.iloc[:apply_idxs[1], 1] /= 3.0
high_base.iloc[:apply_idxs[2], 1] /= 5.0
high_loader = DataFrameFFCLoader(high, high_base, adjustments)
loader = MultiColumnLoader({low: low_loader, high: high_loader})
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
for window_length in range(1, 4):
low_mavg = SimpleMovingAverage(
inputs=[USEquityPricing.low],
window_length=window_length,
)
high_mavg = SimpleMovingAverage(
inputs=[USEquityPricing.high],
window_length=window_length,
)
bounds = product_upper_triangle(range(window_length, len(dates)))
for start, stop in bounds:
results = engine.factor_matrix(
{'low': low_mavg, 'high': high_mavg},
dates[start],
dates[stop],
)
self.assertEqual(set(results.columns), {'low', 'high'})
iloc_bounds = slice(start, stop + 1) # +1 to include end date
low_results = results.unstack()['low']
assert_frame_equal(low_results, low_base.iloc[iloc_bounds])
high_results = results.unstack()['high']
assert_frame_equal(high_results, high_base.iloc[iloc_bounds])
class SyntheticBcolzTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.first_asset_start = Timestamp('2015-04-01', tz='UTC')
cls.env = TradingEnvironment.instance()
cls.trading_day = cls.env.trading_day
cls.asset_info = make_rotating_asset_info(
num_assets=6,
first_start=cls.first_asset_start,
frequency=cls.trading_day,
periods_between_starts=4,
asset_lifetime=8,
)
cls.all_assets = cls.asset_info.index
cls.all_dates = date_range(
start=cls.first_asset_start,
end=cls.asset_info['end_date'].max(),
freq=cls.trading_day,
)
cls.finder = AssetFinder(cls.asset_info)
cls.temp_dir = TempDirectory()
cls.temp_dir.create()
cls.writer = SyntheticDailyBarWriter(
asset_info=cls.asset_info[['start_date', 'end_date']],
calendar=cls.all_dates,
)
table = cls.writer.write(
cls.temp_dir.getpath('testdata.bcolz'),
cls.all_dates,
cls.all_assets,
)
cls.ffc_loader = USEquityPricingLoader(
BcolzDailyBarReader(table),
NullAdjustmentReader(),
)
@classmethod
def tearDownClass(cls):
cls.temp_dir.cleanup()
def test_SMA(self):
engine = SimpleFFCEngine(
self.ffc_loader,
self.env.trading_days,
self.finder,
)
dates, assets = self.all_dates, self.all_assets
window_length = 5
SMA = SimpleMovingAverage(
inputs=(USEquityPricing.close,),
window_length=window_length,
)
results = engine.factor_matrix(
{'sma': SMA},
dates[window_length],
dates[-1],
)
raw_closes = self.writer.expected_values_2d(dates, assets, 'close')
expected_sma_result = rolling_mean(
raw_closes,
window_length,
min_periods=1,
)
expected_sma_result[isnan(raw_closes)] = nan
expected_sma_result = expected_sma_result[window_length:]
sma_result = results['sma'].unstack()
assert_frame_equal(
sma_result,
DataFrame(
expected_sma_result,
index=dates[window_length:],
columns=assets,
),
)
def test_drawdown(self):
# The monotonically-increasing data produced by SyntheticDailyBarWriter
# exercises two pathological cases for MaxDrawdown. The actual
# computed results are pretty much useless (everything is either NaN)
# or zero, but verifying we correctly handle those corner cases is
# valuable.
engine = SimpleFFCEngine(
self.ffc_loader,
self.env.trading_days,
self.finder,
)
dates, assets = self.all_dates, self.all_assets
window_length = 5
drawdown = MaxDrawdown(
inputs=(USEquityPricing.close,),
window_length=window_length,
)
results = engine.factor_matrix(
{'drawdown': drawdown},
dates[window_length],
dates[-1],
)
dd_result = results['drawdown']
# We expect NaNs when the asset was undefined, otherwise 0 everywhere,
# since the input is always increasing.
expected = self.writer.expected_values_2d(dates, assets, 'close')
expected[~isnan(expected)] = 0
expected = expected[window_length:]
assert_frame_equal(
dd_result.unstack(),
DataFrame(
expected,
index=dates[window_length:],
columns=assets,
),
)
class MultiColumnLoaderTestCase(TestCase):
def setUp(self):
self.assets = [1, 2, 3]
self.dates = date_range('2014-01-01', '2014-02-01', freq='D', tz='UTC')
asset_info = make_simple_asset_info(
self.assets,
start_date=self.dates[0],
end_date=self.dates[-1],
)
self.asset_finder = AssetFinder(asset_info)
def test_engine_with_multicolumn_loader(self):
open_, close = USEquityPricing.open, USEquityPricing.close
loader = MultiColumnLoader({
open_: ConstantLoader(dates=self.dates,
assets=self.assets,
constants={open_: 1}),
close: ConstantLoader(dates=self.dates,
assets=self.assets,
constants={close: 2})
})
engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
factor = RollingSumDifference()
result = engine.factor_matrix({'f': factor},
self.dates[2],
self.dates[-1])
self.assertIsNotNone(result)
self.assertEqual({'f'}, set(result.columns))
# (close - open) * window = (1 - 2) * 3 = -3
# skipped 2 from the start, so that the window is full
check_arrays(result['f'],
Series([-3] * len(self.assets) * (len(self.dates) - 2)))
| apache-2.0 |
CforED/Machine-Learning | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
ivannz/study_notes | year_15_16/thesis/notebooks/non_gaussian_profile.py | 1 | 6063 | """None"""
import time
import os
import numpy as np
from sklearn.grid_search import ParameterGrid
from sklearn.base import clone
from sklearn.gaussian_process import GaussianProcess
from scipy.stats import norm
from joblib import Parallel, delayed
from utils.state import _save
from utils.functions_1d import f6, heaviside, pressure2
from utils.conformal import RRCM, CRR
from utils.KRR import KRR_AB
np.seterr(all="ignore")
BASE_PATH = os.path.join(".", "prof_nongauss")
if not os.path.exists(BASE_PATH):
os.mkdir(BASE_PATH)
n_jobs, verbose = -1, 0
parallel_ = Parallel(n_jobs=n_jobs, verbose=verbose)
## The levels and the random state
seeds_ = [0x4A04E61B, 0x7A5F2F22, 0x52B4A070, 0x470BB766,]
random_state = np.random.RandomState(seeds_[0])
levels = np.asanyarray([0.01, 0.05, 0.10, 0.25])[::-1]
## helpers
def _helper(y, A, B, proc=RRCM, levels=levels, parallel=None, n_jobs=1, verbose=0):
if not isinstance(parallel, Parallel):
parallel = Parallel(n_jobs=n_jobs, verbose=verbose)
regions = parallel(delayed(proc)(A[k], B[k], levels=levels)
for k in xrange(y.shape[0]))
hits_ = np.asarray(
[[np.any(((int_[:, 0] <= target) & (target <= int_[:, 1]))).astype(float)
for int_ in region]
for target, region in zip(y, regions)])
width_ = np.asarray(
[[np.sum(int_[:, 1] - int_[:, 0]) for int_ in region] for region in regions])
bounds_ = np.asarray(
[[[int_[:, 0].min(), int_[:, 1].max()] for int_ in region] for region in regions])
return hits_, width_, bounds_
## Define the grid
true_nugget = [1e-6, 1e-1,]
test_functions = [f6, heaviside, pressure2]
sizes = [50, 200,]
grid_ = ParameterGrid(dict(nugget=true_nugget,
theta0=[1e+2, "auto"]))
## Initialize
kernel = 'rbf'
gp = GaussianProcess(beta0=0, normalize=False, corr='squared_exponential')
# kernel = 'laplacian'
# gp = GaussianProcess(beta0=0, normalize=False, corr='absolute_exponential')
# Generate input
XX_test = np.linspace(0, 1, num=501).reshape((-1, 1))
test_ = np.s_[:XX_test.shape[0]]
experiment, batch_, dumps_ = list(), 1, list()
for func_ in test_functions:
for size_ in sizes:
XX_train = np.linspace(0.05, 0.95, num=size_ + 1).reshape((-1, 1))
XX = np.concatenate([XX_test, XX_train], axis=0)
for noise_ in true_nugget:
yy = func_(XX)
if yy.ndim == 1:
yy = yy.reshape((-1, 1))
if noise_ > 0:
yy += random_state.normal(size=yy.shape) * noise_
## Split the pooled sample
yy_train, yy_test = np.delete(yy, test_, axis=0), yy[test_].copy()
for i_, par_ in enumerate(grid_):
nugget_, theta0_ = par_['nugget'], par_['theta0']
key_ = func_.__name__, noise_, theta0_, nugget_, size_
print i_, key_
tick_ = time.time()
## Fit a GPR
gp_ = clone(gp)
gp_.nugget = nugget_
if isinstance(theta0_, float):
gp_.theta0 = theta0_
elif theta0_ == "auto":
gp_.thetaL, gp_.thetaU, gp_.theta0 = 1.0, 1e4, float(size_)
gp_.fit(XX_train, yy_train)
## Compute the A, B matrices
A, B, y_hat_, MM, loo_, A_loo, B_loo = \
KRR_AB(XX_train, yy_train, XX_test, forecast=True,
nugget=gp_.nugget, metric=kernel, gamma=gp_.theta_[0])
del loo_
## Construct the CKRR confidence interval: RRCM
rrcm_hits_, rrcm_width_, rrcm_bounds_ = \
_helper(yy_test, A[0], B, proc=RRCM,
levels=levels, parallel=parallel_)
## Construct the CKRR confidence interval: CCR-sided
crr_hits_, crr_width_, crr_bounds_ = \
_helper(yy_test, A[0], B, proc=CRR,
levels=levels, parallel=parallel_)
## Construct the CKRR confidence interval: RRCM
loo_rrcm_hits_, loo_rrcm_width_, loo_rrcm_bounds_ = \
_helper(yy_test, A_loo[0], B_loo, proc=RRCM,
levels=levels, parallel=parallel_)
## Construct the CKRR confidence interval: CCR-sided
loo_crr_hits_, loo_crr_width_, loo_crr_bounds_ = \
_helper(yy_test, A_loo[0], B_loo, proc=CRR,
levels=levels, parallel=parallel_)
## Construct the GPR forecast interval
z_a = norm.ppf(1 - .5 * levels)
half_width_ = np.sqrt(MM * gp_.sigma2) * z_a[np.newaxis]
bf_bounds_ = np.stack([y_hat_ - half_width_, y_hat_ + half_width_], axis=-1)
bf_width_ = bf_bounds_[..., 1] - bf_bounds_[..., 0]
bf_hits_ = ((bf_bounds_[..., 0] <= yy_test)
& (yy_test <= bf_bounds_[..., 1])).astype(float)
## Construct the GPR prediction interval
half_width_ = np.sqrt((MM - gp_.nugget) * gp_.sigma2) * z_a[np.newaxis]
bp_bounds_ = np.stack([y_hat_ - half_width_, y_hat_ + half_width_], axis=-1)
bp_width_ = bp_bounds_[..., 1] - bp_bounds_[..., 0]
bp_hits_ = ((bp_bounds_[..., 0] <= yy_test)
& (yy_test <= bp_bounds_[..., 1])).astype(float)
replication = (yy_test[:, 0], y_hat_[:, 0],
bp_bounds_, bf_bounds_,
rrcm_bounds_, crr_bounds_,
loo_rrcm_bounds_, loo_crr_bounds_,)
tock_ = time.time()
print "%0.3fsec"%(tock_-tick_,)
result_ = tuple(rep_ for rep_ in replication)
experiment.append((key_,) + result_)
basename_ = os.path.join(BASE_PATH, "prof_nongauss_%04d"%(batch_,))
dumps_.append(_save(experiment, basename_, gz=9))
| mit |
hughdbrown/QSTK-nohist | Examples/Basic/tutorial2.py | 3 | 2170 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on September, 12, 2011
@author: Tucker Balch
@contact: [email protected]
@summary: Example tutorial code.
'''
#
# imports
#
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import datetime as dt
#
# read in and slice up the data
#
data = np.loadtxt('example-data.csv',delimiter=',',skiprows=1)
pricedat = data[:,3:]
datesdat = np.int_(data[:,0:3]) # date stuff should be integer
pricesnames = ['$SPX','XOM','GOOG','GLD']
print 'first 5 rows of price data:'
print pricedat[:5,:]
print
print 'first 5 rows of dates:'
print datesdat[:5,:]
#
# Convert columns of date info into date objects
#
dates = []
for i in range(0,datesdat.shape[0]):
dates.append(dt.date(datesdat[i,0],datesdat[i,1],datesdat[i,2]))
#
# Plot the closing prices
#
plt.clf()
for i in range(0,size(pricedat[0,:])):
plt.plot(dates,pricedat[:,i])
plt.legend(pricesnames)
plt.ylabel('Adjusted Close')
plt.xlabel('Date')
savefig('adjustedclose.pdf', format='pdf')
#
# Plot the normalized data
#
plt.clf()
normdat = pricedat/pricedat[0,:]
for i in range(0,size(normdat[0,:])):
plt.plot(dates,normdat[:,i])
plt.legend(pricesnames)
plt.ylabel('Normalized Adjusted Close')
plt.xlabel('Date')
savefig("normalizedclose.pdf", format='pdf')
#
# Daily returns
#
plt.clf()
dailyrets = concatenate(([(zeros(pricedat.shape[1]))],
((pricedat[1:,:]/pricedat[0:-1,:]) - 1)),axis=0)
plt.plot(dates[0:49],dailyrets[0:49,0]) # just plot first 50 days
plt.plot(dates[0:49],dailyrets[0:49,1])
plt.axhline(y=0,color='r')
plt.legend(['$SPX','XOM'])
plt.ylabel('Daily Returns')
plt.xlabel('Date')
savefig("dailyrets.pdf", format='pdf')
#
# scatter plot $SPX v XOM
#
plt.clf()
plt.scatter(dailyrets[:,0],dailyrets[:,1],c='blue')
plt.ylabel('XOM')
plt.xlabel('$SPX')
savefig("scatterSPXvXOM.pdf", format='pdf')
#
# scatter plot $SPX v GLD
#
plt.clf()
plt.scatter(dailyrets[:,0],dailyrets[:,3],c='red')
plt.ylabel('GLD')
plt.xlabel('$SPX')
savefig("scatterSPXvGLD.pdf", format='pdf')
| bsd-3-clause |
Jacobsolawetz/trading-with-python | lib/functions.py | 76 | 11627 | # -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df) | bsd-3-clause |
jayhetee/pandashells | pandashells/lib/module_checker_lib.py | 7 | 2195 | #! /usr/bin/env python
import sys
import importlib
from pandashells.lib import config_lib
# define the default error message to show when a module can't be found
HEADER = "\n\nThis tool requires packages that have not been installed.\n"
HEADER += "Below is a list of missing packages along with commands for\n"
HEADER += "installing them.\n\n"
# define a dict to map a module name to its install command
CMD_DICT = {
'dateutil': 'pip install dateutil',
'gatspy': 'pip install gatspy',
'matplotlib': 'conda install matplotlib',
'mpld3': 'pip install mpld3',
'numpy': 'conda install numpy',
'pandas': 'conda install pandas',
'pylab': 'conda install matplotlib',
'requests': 'pip install requests',
'scipy': 'conda install scipy',
'seaborn': 'conda install seaborn',
'statsmodels': 'conda install statsmodels',
}
def check_for_modules(module_list):
# make sure module_list only contains recognized modules
unnamed_modules = set(module_list) - set(CMD_DICT.keys())
unnamed_modules = unnamed_modules - {'datetime', 're'}
if unnamed_modules:
msg = '\n\nThese modules unrecognized by check_for_modules(): '
msg += '{}\n'.format(unnamed_modules)
raise ValueError(msg)
# try using configured backend ignoring errors so they'll be caught later
if set(module_list).intersection({'matplotlib', 'pylab', 'seaborn'}):
CONFIG = config_lib.get_config()
try:
import matplotlib
if matplotlib.get_backend() != CONFIG['plot_backend']:
matplotlib.use(CONFIG['plot_backend'])
except ImportError:
pass
# initialize an error message
msg = ''
# try importing all the required mojkdules
for module in sorted(module_list):
try:
importlib.import_module(module)
except ImportError:
# add to error message for each bad module
msg = msg if msg else HEADER
msg += '-' * 60 + '\n'
msg += "Missing module '{}'. To install use: \n".format(module)
msg += " {}\n\n".format(CMD_DICT[module])
sys.stdout.write(msg + '\n')
raise
| bsd-2-clause |
JeanKossaifi/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
igormunkin/CMC-CTMM | task04_particles/gui.py | 1 | 9040 | import sys
import re
import time
from controller import ParticleController, ParticlePlot
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtGui import QIcon, QFont, QColor
from PyQt5.QtWidgets import (
QApplication, QTextEdit, QLabel, QPushButton, QMessageBox, QDesktopWidget,
QMainWindow, QSlider, QToolTip, QSizePolicy, QComboBox
)
class ParticleGUI(QMainWindow, ParticleController):
handlers = { Qt.Key_Escape: lambda self: self.close() }
qlwidth = 20
qlheight = 24
qtewidth = 55
qteheight = 24
def __init__(self):
super().__init__(),
QToolTip.setFont(QFont('Arial', 10))
self.__init_main_window()
self.__init_labels(left = 30, top = 400)
self.__init_textboxes(left = 50, top = 400)
self.__init_slider_particle_mass()
self.__init_combobox_verlet()
self.__init_button_add_particle()
self.__init_button_stat()
self.__init_plot()
self.show()
def __init_main_window(self):
""" Init function for application's main window """
self.resize(700, 400)
self.setWindowTitle('Particles')
frame = self.frameGeometry()
frame.moveCenter(QDesktopWidget().availableGeometry().center())
self.move(frame.topLeft())
def __init_labels(self, left, top):
s_lbl = QLabel("Velocity", self)
s_lbl.setGeometry(left + __class__.qlwidth, top + __class__.qlheight,
__class__.qtewidth, __class__.qlheight)
u_lbl = QLabel(" u: ", self)
u_lbl.setGeometry(left, top + 2 * __class__.qlheight, __class__.qlwidth,
__class__.qlheight)
v_lbl = QLabel(" v: ", self)
v_lbl.setGeometry(left, top + 3.25 * __class__.qlheight, __class__.qlwidth,
__class__.qlheight)
p_lbl = QLabel("Position", self)
p_lbl.setGeometry(left + 2 * __class__.qlwidth + __class__.qtewidth,
top + __class__.qlheight, __class__.qtewidth, __class__.qlheight)
x_lbl = QLabel(" x: ", self)
x_lbl.setGeometry(left + __class__.qlwidth + __class__.qtewidth,
top + 2 * __class__.qlheight, __class__.qlwidth, __class__.qlheight)
y_lbl = QLabel(" y: ", self)
y_lbl.setGeometry(left + __class__.qlwidth + __class__.qtewidth,
top + 3.25 * __class__.qlheight, __class__.qlwidth, __class__.qlheight)
e_lbl = QLabel("Optional", self)
e_lbl.setGeometry(left + 3 * __class__.qlwidth + 2 * __class__.qtewidth,
top + __class__.qlheight, __class__.qtewidth, __class__.qlheight)
l_lbl = QLabel(" lifetime: ", self)
l_lbl.setGeometry(left + 3 * __class__.qlwidth + 2 * __class__.qtewidth,
top + 2 * __class__.qlheight, __class__.qtewidth, __class__.qlheight)
m_lbl = QLabel(" mass: ", self)
m_lbl.setGeometry(left + 3 * __class__.qlwidth + 2 * __class__.qtewidth,
top + 3.25 * __class__.qlheight, __class__.qtewidth, __class__.qlheight)
o_lbl = QLabel(" Methods: ", self)
o_lbl.setGeometry(left + 5 * __class__.qlwidth + 4 * __class__.qtewidth,
top + 3.25 * __class__.qlheight, 4 * __class__.qlwidth, __class__.qlheight)
def __init_textboxes(self, left, top):
self.u_tbx = QTextEdit(str(self.velocity["u"]), self)
self.u_tbx.setGeometry(left, top + 2 * __class__.qlheight,
__class__.qtewidth, __class__.qteheight)
self.u_tbx.setTabChangesFocus(True)
self.u_tbx.textChanged.connect(self.__u_tbx_changed)
self.v_tbx = QTextEdit(str(self.velocity["v"]), self)
self.v_tbx.setGeometry(left, top + 3.25 * __class__.qlheight,
__class__.qtewidth, __class__.qteheight)
self.v_tbx.setTabChangesFocus(True)
self.v_tbx.textChanged.connect(self.__v_tbx_changed)
self.x_tbx = QTextEdit(str(self.position["x"]), self)
self.x_tbx.setGeometry(left + __class__.qlwidth + __class__.qtewidth,
top + 2 * __class__.qteheight, __class__.qtewidth, __class__.qteheight)
self.x_tbx.setTabChangesFocus(True)
self.x_tbx.textChanged.connect(self.__x_tbx_changed)
self.y_tbx = QTextEdit(str(self.position["y"]), self)
self.y_tbx.setGeometry(left + __class__.qlwidth + __class__.qtewidth,
top + 3.25 * __class__.qteheight, __class__.qtewidth, __class__.qteheight)
self.y_tbx.setTabChangesFocus(True)
self.y_tbx.textChanged.connect(self.__y_tbx_changed)
self.l_tbx = QTextEdit(str(self.lifetime), self)
self.l_tbx.setGeometry(left + 5 * __class__.qlwidth + 2 * __class__.qtewidth,
top + 2 * __class__.qteheight, __class__.qtewidth, __class__.qteheight)
self.l_tbx.setTabChangesFocus(True)
self.l_tbx.textChanged.connect(self.__l_tbx_changed)
def __init_slider_particle_mass(self):
"""Init function for slider that changes mass of the particle that will
be created next"""
sld = QSlider(Qt.Horizontal, self)
sld.setFocusPolicy(Qt.NoFocus)
sld.setGeometry(245, 475, 100, 30)
sld.setMinimum(1)
sld.setMaximum(1000)
sld.setValue(self.mass / 5000)
sld.valueChanged[int].connect(self.__sld_changed)
def __init_combobox_verlet(self):
""" Init combobox for selecting different Verlet implementation """
self.cmb = QComboBox(self);
self.cmb.setObjectName("cmb")
self.cmb.setGeometry(420, 475, 250, 30)
self.cmb.addItems(self.methods)
self.cmb.currentIndexChanged.connect(self.__cmb_changed)
def __init_button_add_particle(self):
""" Init function for button that adds one particle to the plot """
self.btn = QPushButton('Add particle', self)
self.btn.setToolTip('Press this button to <b>Add a new particle</b>')
self.btn.setGeometry(360, 430, 150, 40)
self.btn.clicked.connect(self._ParticleController__add_particle)
self.btn.setDisabled(False)
def __init_plot(self):
""" Init function for matplotlib plot """
self.plot = ParticlePlot(self, 13.5, 8, 50, QSizePolicy.Fixed)
self.plot.move(0, 0)
self.timer = QTimer(self)
self.timer.timeout.connect(self.__draw_plot)
self.timer.start(42)
def __init_button_stat(self):
""" Init function for button that shows benchmarks stats """
self.sbtn = QPushButton('Yo, stats', self)
self.sbtn.setToolTip('Press this button to <b>Show stats</b>')
self.sbtn.setGeometry(520, 430, 150, 40)
self.sbtn.clicked.connect(self.__sbtn_clicked)
def __draw_plot(self):
""" Function for particles rendering """
self.plot.update_plot(self.particles, self.updaters[self.method])
self.particles = list(filter(lambda p: p.time < p.death, self.particles))
@staticmethod
def __validate(textedit, pattern):
if re.match(pattern, textedit.toPlainText()):
textedit.setStyleSheet("QTextEdit {color: black}")
return True
else:
textedit.setStyleSheet("QTextEdit {color: red}")
return False
def __u_tbx_changed(self):
if __class__.__validate(self.u_tbx, r"^-?\d+(?:\.\d+)?$"):
self.velocity["u"] = float(self.u_tbx.toPlainText())
def __v_tbx_changed(self):
if __class__.__validate(self.v_tbx, r"^-?\d+(?:\.\d+)?$"):
self.velocity["v"] = float(self.v_tbx.toPlainText())
def __x_tbx_changed(self):
if __class__.__validate(self.x_tbx, r"^-?\d+(?:\.\d+)?$"):
self.position["x"] = float(self.x_tbx.toPlainText())
def __y_tbx_changed(self):
if __class__.__validate(self.y_tbx, r"^-?\d+(?:\.\d+)?$"):
self.position["y"] = float(self.y_tbx.toPlainText())
def __l_tbx_changed(self):
if __class__.__validate(self.l_tbx, r"^\d+?$"):
self.lifetime = float(self.l_tbx.toPlainText())
def __sld_changed(self, value):
self.mass = value * 50000
def __cmb_changed(self, value):
self.method = value
print("Current method: ", self.cmb.currentText())
def __sbtn_clicked(self):
print("============== Stats ============")
print("Number of particles: {}".format(len(self.particles)))
for i in range(3):
start = time.time()
self.updaters[i](self.particles)
end = time.time()
print("{} time = {}".format(self.updaters[i].__name__, end - start))
print("============== Stats ============")
def keyPressEvent(self, e):
if e.key() in __class__.handlers.keys():
__class__.handlers[e.key()](self)
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message', "Are you sure to quit?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
event.accept() if reply == QMessageBox.Yes else event.ignore()
if __name__ == '__main__':
app = QApplication(sys.argv)
gui = ParticleGUI()
sys.exit(app.exec())
| mit |
luo66/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
TheChymera/LabbookDB | labbookdb/db/add.py | 1 | 13957 | #!/usr/bin/python
from __future__ import print_function
import argh
import json
import numpy
import pandas as pd
from sqlalchemy import create_engine, literal, update, insert
from sqlalchemy import inspect
from os import path
from sqlalchemy.orm import sessionmaker
import sqlalchemy
from .common_classes import *
from .query import ALLOWED_CLASSES
def load_session(db_path):
"""Load and return a new SQLalchemy session and engine.
Parameters
----------
db_path : str
Path to desired database location, can be relative or use tilde to specify the user $HOME.
Returns
-------
session : sqlalchemy.orm.session.Session
Session instance.
engine : sqlalchemy.engine.Engine
Engine instance.
"""
db_path = "sqlite:///" + path.abspath(path.expanduser(db_path))
engine = create_engine(db_path, echo=False)
#it is very important that `autoflush == False`, otherwise if "treatments" or "measurements" entried precede "external_ids" the latter will insert a null on the animal_id column
Session = sessionmaker(bind=engine, autoflush=False)
session = Session()
Base.metadata.create_all(engine)
return session, engine
def add_to_db(session, engine, myobject):
"""Add an object to session and return the .id attribute value.
Parameters
----------
session : sqlalchemy.orm.session.Session
Session instance, as created with labbookdb.db.add.load_session().
engine : sqlalchemy.engine.Engine
Engine instance correponding to the Session instance under session, as created with labbookdb.db.add.load_session().
myobject : object
LabbookDB object with SQLAlchemy-compatible attributes (e.g. as found under labbookdb.db.common_classes).
Returns
-------
object_id : int
Value of myobject.id attribute
"""
session.add(myobject)
try:
session.commit()
except sqlalchemy.exc.IntegrityError:
print("Please make sure this was not a double entry:", myobject)
object_id=myobject.id
return object_id
def instructions(kind):
"""Print helpful instructions for common error-causing input issues
Parameters
----------
kind : {"table_identifier",}
Shorthand for the instructin message to be printed.
"""
if kind == "table_identifier":
print("Make sure you have entered the filter value correctly. This value is supposed to refer to the id column of another table and needs to be specified as \'table_identifier\'.\'field_by_which_to_filter\'.\'target_value\'")
def get_related_ids(session, engine, parameters):
"""Return the .id attribute value for existing entries matched by a string following the LabbookDB-syntax.
Parameters
----------
session : sqlalchemy.orm.session.Session
Session instance, as created with labbookdb.db.add.load_session().
engine : sqlalchemy.engine.Engine
Engine instance correponding to the Session instance under session, as created with labbookdb.db.add.load_session().
parameters : str
LabbookDB-syntax string specifying an existing entry.
Returns
-------
ids : list of int
.id attribute values for the entries matched by the LabbookDB-syntax string.
sql_query : sqlalchemy.orm.query.Query
Query corresponding to the LabbookDB-syntax string
Examples
--------
>>> from labbookdb.db import add
>>> session, engine = add.load_session("lala.db")
>>> add.get_related_ids(s,e,"Animal:external_ids.AnimalExternalIdentifier:database.ETH/AIC/cdb&#&identifier.275511")
BaseException: No entry was found with a value of "275511" on the "identifier" column of the "AnimalExternalIdentifier" CATEGORY, in the database.
>>> add.get_related_ids(s,e,"Animal:external_ids.AnimalExternalIdentifier:database.ETH/AIC/cdb&&identifier.275511")
BaseException: No entry was found with a value of "ETH/AIC/cdb" on the "database" column of the "AnimalExternalIdentifier" CATEGORY, in the database.
Notes
-----
Recursivity :
This function calls itself recursively in order to get the .id attribute values of related entries (and related entries of related entries, etc.) specified in the LabbookDB-syntax string.
Multiple constraints are separated by double ampersands which may be separated by none or up to two hashtags, to specify at which level the constrains tshould be applied to.
One hashtag is removed on each recursion step, and the constraint is only evaluated when there are no hashtags left.
"Animal:external_ids.AnimalExternalIdentifier:database.ETH/AIC/cdb&#&identifier.275511" will look for both the database and the identifier attributes in the AnimalExternalIdentifier class, while "Animal:external_ids.AnimalExternalIdentifier:database.ETH/AIC/cdb&#&identifier.275511" will look for the database attribute on the AnimalExternalIdentifier class, and for the identifier attribute on the Animal class.
"""
category = parameters.split(":",1)[0]
sql_query=session.query(ALLOWED_CLASSES[category])
for field_value in parameters.split(":",1)[1].split("&&"):
field, value = field_value.split(".",1)
if "&#&" in value or "&##&" in value:
value=value.replace("&#&", "&&")
value=value.replace("&##&", "&#&")
if ":" in value:
values, objects = get_related_ids(session, engine, value)
for value in values:
value=int(value) # the value is returned as a numpy object
if field[-4:] == "date": # support for date entry matching (the values have to be passes as string but matched as datetime)
value = datetime.datetime(*[int(i) for i in value.split(",")])
# we are generally looking to match values, but sometimes the parent table does not have an id column, but only a relationship column (e.g. in one to many relationships)
try:
sql_query = sql_query.filter(getattr(ALLOWED_CLASSES[category], field)==value)
except sqlalchemy.exc.InvalidRequestError:
sql_query = sql_query.filter(getattr(ALLOWED_CLASSES[category], field).contains(*[i for i in objects]))
else:
if field[-4:] == "date": # support for date entry matching (the values have to be passes as string but matched as datetime)
value = datetime.datetime(*[int(i) for i in value.split(",")])
sql_query = sql_query.filter(getattr(ALLOWED_CLASSES[category], field)==value)
mystring = sql_query.with_labels().statement
mydf = pd.read_sql_query(mystring,engine)
category_tablename = ALLOWED_CLASSES[category].__table__.name
related_table_ids = mydf[category_tablename+"_id"]
ids = list(related_table_ids)
ids = [int(i) for i in ids]
if ids == []:
raise BaseException("No entry was found with a value of \""+str(value)+"\" on the \""+field+"\" column of the \""+category+"\" CATEGORY, in the database.")
session.close()
engine.dispose()
return ids, sql_query
def append_parameter(db_path, entry_identification, parameters):
"""Assigns a value to a given parameter of a given entry.
Parameters
----------
db_path : str
A string especifying the database path
entry_identification : str
A LabbookDB syntax string specifying an instance of an object for which to update a parameter.
Example strings: "Animal:external_ids.AnimalExternalIdentifier:database.ETH/AIC&#&identifier.5701" , "Cage:id.14"
parameters : str or dict
A LabbookDB-style dictionary (or JSON interpretable as dictionary), where keys are strings giving the names of attributes of the class selected by entry_identification, and values are either the values to assign (verbatim: string, int, or float) or LabbookDB-syntax strings specifying a related entry, or a list of LabbookDB-syntax strings specifying related entries, or a list of LabbookDB-style dictionaries specifying new entries to be created and linked.
"""
if isinstance(parameters, str):
parameters = json.loads(parameters)
session, engine = load_session(db_path)
entry_class = ALLOWED_CLASSES[entry_identification.split(":")[0]]
my_id = get_related_ids(session, engine, entry_identification)[0][0]
myobject = session.query(entry_class).filter(entry_class.id == my_id)[0]
for parameter_key in parameters:
parameter_expression = parameters[parameter_key]
if isinstance(parameter_expression, (str, int, float)):
if ":" in parameter_expression and "." in parameter_expression:
related_entry_ids, _ = get_related_ids(session, engine, i)
related_entry_class = ALLOWED_CLASSES[i.split(":")[0]]
for related_entry_id in related_entry_ids:
related_entry = session.query(related_entry_class).filter(related_entry_class.id == related_entry_id)[0]
setattr(myobject, parameter_key, related_entry)
else:
if parameter_key[-4:] == "date":
parameter_expression = datetime.datetime(*[int(i) for i in parameter_expression.split(",")])
setattr(myobject, parameter_key, parameter_expression)
else:
set_attribute = getattr(myobject, parameter_key)
for parameter_expression_entry in parameter_expression:
if isinstance(parameter_expression_entry, dict):
new_entry, _ = add_generic(db_path, parameter_expression_entry, session=session, engine=engine)
set_attribute.append(new_entry)
elif isinstance(parameter_expression_entry, str):
related_entry_ids, _ = get_related_ids(session, engine, parameter_expression_entry)
related_entry_class = ALLOWED_CLASSES[parameter_expression_entry.split(":")[0]]
for related_entry_id in related_entry_ids:
related_entry = session.query(related_entry_class).filter(related_entry_class.id == related_entry_id)[0]
set_attribute.append(related_entry)
commit_and_close(session, engine)
def add_generic(db_path, parameters, session=None, engine=None):
"""Adds new entries based on a LabbookDB-syntax parameter dictionary.
Parameters
----------
db_path : str
Path to database to open if session and engine parameters are not already passed, can be relative or use tilde to specify the user $HOME.
parameters : str or dict
A LabbookDB-style dictionary (or JSON interpretable as dictionary), where keys are "CATEGORY" and other strings specifying the attribute names for the object to be created, and values are the class name (for "CATEGORY") and either the values to assign (verbatim: string, int, or float) or LabbookDB-syntax strings specifying a related entry, or a list of LabbookDB-syntax strings specifying related entries, or a list of LabbookDB-style dictionaries specifying new entries to be created and linked.
session : sqlalchemy.orm.session.Session, optional
Session instance, as created with labbookdb.db.add.load_session().
engine : sqlalchemy.engine.Engine, optional
Engine instance correponding to the Session instance under session, as created with labbookdb.db.add.load_session().
Returns
-------
myobject : object
LabbookDB object with SQLAlchemy-compatible attributes (e.g. as found under labbookdb.db.common_classes).
object_id : int
Value of myobject.id attribute.
"""
if not (session and engine) :
session, engine = load_session(db_path)
close = True
else:
close = False
if isinstance(parameters, str):
parameters = json.loads(parameters)
category_class = ALLOWED_CLASSES[parameters["CATEGORY"]]
if list(parameters.keys()) == ["CATEGORY"]:
attributes = dir(category_class())
filtered_attributes = [i for i in attributes if i[0] != "_"]
print("You can list the following keys as part of your parameters: " + ", ".join(filtered_attributes))
parameters.pop("CATEGORY", None)
myobject = category_class()
columns = inspect(myobject).mapper.column_attrs
relationships = inspect(myobject).mapper.relationships
all_attributes = [attr.key for attr in columns+relationships]
for key, _ in sorted(parameters.items()):
if key not in all_attributes:
raise ValueError("'"+myobject.__class__.__name__+"' object does not support '"+key+"' attribute. Acceptable attributes are: "+" ,".join(all_attributes)+".")
if key[-4:] == "date":
parameters[key] = datetime.datetime(*[int(i) for i in parameters[key].split(",")])
if key[-3:] == "_id" and not isinstance(parameters[key], int):
try:
input_values, _ = get_related_ids(session, engine, parameters[key])
except ValueError:
instructions("table_identifier")
for input_value in input_values:
input_value = int(input_value)
print("Setting", myobject.__class__.__name__+"'s",key,"attribute to",input_value)
setattr(myobject, key, input_value)
#this triggers on-the-fly related-entry creation:
elif isinstance(parameters[key], list):
related_entries=[]
for related_entry in parameters[key]:
if isinstance(related_entry, dict):
related_entry, _ = add_generic(db_path, related_entry, session=session, engine=engine)
related_entries.append(related_entry)
elif isinstance(related_entry, str):
my_id = get_related_ids(session, engine, related_entry)[0][0]
entry_class = ALLOWED_CLASSES[related_entry.split(":")[0]]
related_entry = session.query(entry_class).\
filter(entry_class.id == my_id).all()[0]
related_entries.append(related_entry)
session.add(myobject) # voodoo (imho) fix for the weird errors about myobject not being attached to a Session
print("Setting", myobject.__class__.__name__+"'s",key,"attribute to",related_entries)
setattr(myobject, key, related_entries)
else:
print("Setting", myobject.__class__.__name__+"'s",key,"attribute to",parameters[key])
setattr(myobject, key, parameters[key])
object_id = add_to_db(session, engine, myobject)
if close:
session.close()
engine.dispose()
return myobject, object_id
def commit_and_close(session, engine):
"""Commit and close session and dispose of engine.
Nonfatal for sqlalchemy.exc.IntegrityError with print notification.
Parameters
----------
session : sqlalchemy.orm.session.Session, optional
Session instance, as created with labbookdb.db.add.load_session().
engine : sqlalchemy.engine.Engine, optional
Engine instance correponding to the Session instance under session, as created with labbookdb.db.add.load_session().
"""
try:
session.commit()
except sqlalchemy.exc.IntegrityError:
print("Please make sure this was not a double entry.")
session.close()
engine.dispose()
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter10/fig_matchedfilt_chirp.py | 3 | 4184 | """
Matched Filter Chirp Search
---------------------------
Figure 10.26
A matched filter search for a chirp signal in time series data. A simulated
data set generated from a model of the form y = b0+Asin[omega t + beta t^2],
with homoscedastic Gaussian errors with sigma = 2, is shown in the top-right
panel. The posterior pdf for the four model parameters is determined using
MCMC and shown in the other panels.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
# Hack to fix import issue in older versions of pymc
import scipy
import scipy.misc
scipy.derivative = scipy.misc.derivative
import pymc
from astroML.plotting.mcmc import plot_mcmc
from astroML.decorators import pickle_results
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#----------------------------------------------------------------------
# Set up toy dataset
def chirp(t, b0, beta, A, omega):
return b0 + A * np.sin(omega * t + beta * t * t)
np.random.seed(0)
N = 100
b0_true = 10
A_true = 5
beta_true = 0.01
omega_true = 0.1
sigma = 2.0
t = 100 * np.random.random(N)
y_true = chirp(t, b0_true, beta_true, A_true, omega_true)
y_obs = np.random.normal(y_true, sigma)
t_fit = np.linspace(0, 100, 1000)
y_fit = chirp(t_fit, b0_true, beta_true, A_true, omega_true)
i = np.argsort(t)
#----------------------------------------------------------------------
# Set up MCMC sampling
b0 = pymc.Uniform('b0', 0, 50, value=50 * np.random.random())
A = pymc.Uniform('A', 0, 50, value=50 * np.random.random())
log_beta = pymc.Uniform('log_beta', -10, 10, value=-4.6)
log_omega = pymc.Uniform('log_omega', -10, 10, value=-2.3)
# uniform prior on log(beta)
@pymc.deterministic
def beta(log_beta=log_beta):
return np.exp(log_beta)
# uniform prior on log(omega)
@pymc.deterministic
def omega(log_omega=log_omega):
return np.exp(log_omega)
@pymc.deterministic
def y_model(t=t, b0=b0, A=A, beta=beta, omega=omega):
return chirp(t, b0, beta, A, omega)
y = pymc.Normal('y', mu=y_model, tau=sigma ** -2, observed=True, value=y_obs)
model = dict(b0=b0, A=A,
log_beta=log_beta, beta=beta,
log_omega=log_omega, omega=omega,
y_model=y_model, y=y)
#----------------------------------------------------------------------
# Run the MCMC sampling (saving results to a pickle)
@pickle_results('matchedfilt_chirp.pkl')
def compute_MCMC_results(niter=20000, burn=2000):
S = pymc.MCMC(model)
S.sample(iter=niter, burn=burn)
traces = [S.trace(s)[:] for s in ['b0', 'A', 'omega', 'beta']]
M = pymc.MAP(model)
M.fit()
fit_vals = (M.b0.value, M.beta.value, M.A.value, M.omega.value)
return traces, fit_vals
traces, fit_vals = compute_MCMC_results()
labels = ['$b_0$', '$A$', r'$\omega$', r'$\beta$']
limits = [(9.5, 11.3), (3.6, 6.4), (0.065, 0.115), (0.00975, 0.01045)]
true = [b0_true, A_true, omega_true, beta_true]
#----------------------------------------------------------------------
# Find the Maximum a posteriori values
fig = plt.figure(figsize=(5, 5))
ax = plt.axes([0.5, 0.7, 0.45, 0.25])
t_fit = np.linspace(0, 100, 1001)
y_fit = chirp(t_fit, *fit_vals)
plt.scatter(t, y_obs, s=9, lw=0, c='k')
plt.plot(t_fit, y_fit, '-k')
plt.xlim(0, 100)
plt.xlabel('$t$')
plt.ylabel(r'$h_{\rm obs}$')
# This function plots multiple panels with the traces
plot_mcmc(traces, labels=labels, limits=limits, true_values=true, fig=fig,
bins=30, bounds=[0.12, 0.08, 0.95, 0.91], colors='k')
plt.show()
| bsd-2-clause |
medit74/DeepLearning | MyPythonDeepLearning/Convolution/train_convnet.py | 1 | 1494 | # coding: utf-8
import sys, os
sys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정
import numpy as np
import matplotlib.pyplot as plt
from Common.mnist import load_mnist
from Convolution.simple_convnet import SimpleConvNet
from Common.trainer import Trainer
# 데이터 읽기
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)
# 시간이 오래 걸릴 경우 데이터를 줄인다.
#x_train, t_train = x_train[:5000], t_train[:5000]
#x_test, t_test = x_test[:1000], t_test[:1000]
max_epochs = 20
network = SimpleConvNet(input_dim=(1,28,28),
conv_param = {'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
hidden_size=100, output_size=10, weight_init_std=0.01)
trainer = Trainer(network, x_train, t_train, x_test, t_test,
epochs=max_epochs, mini_batch_size=100,
optimizer='Adam', optimizer_param={'lr': 0.001},
evaluate_sample_num_per_epoch=1000)
trainer.train()
# 매개변수 보존
network.save_params("params.pkl")
print("Saved Network Parameters!")
# 그래프 그리기
markers = {'train': 'o', 'test': 's'}
x = np.arange(max_epochs)
plt.plot(x, trainer.train_acc_list, marker='o', label='train', markevery=2)
plt.plot(x, trainer.test_acc_list, marker='s', label='test', markevery=2)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
| apache-2.0 |
walterreade/scikit-learn | sklearn/mixture/gmm.py | 13 | 30702 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model.
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. The best results is kept.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the Dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) # doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) # doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance."""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: Due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate.
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""Perform the Mstep of the EM algorithm and return the cluster weights.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model."""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model."""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model."""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for diagonal cases."""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Perform the covariance M step for spherical cases."""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for full cases."""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for tied cases."""
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
walterreade/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
btabibian/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 11 | 25443 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [200, 250, 300]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
m = "'init' must be 'pca', 'random', or a numpy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
@skip_if_32bit
def test_n_iter_without_progress():
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=-1, verbose=2,
random_state=1, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert_in("did not make any progress during the "
"last -1 episodes. Finished.", out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '')
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert_less_equal(n_smaller_gradient_norms, 1)
def test_accessible_kl_divergence():
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in out.split('\n')[::-1]:
if 'Iteration' in line:
_, _, error = line.partition('error = ')
if error:
error, _, _ = error.partition(',')
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/frame/test_to_csv.py | 7 | 44295 | # -*- coding: utf-8 -*-
from __future__ import print_function
import csv
import pytest
from numpy import nan
import numpy as np
from pandas.compat import (lmap, range, lrange, StringIO, u)
from pandas.errors import ParserError
from pandas import (DataFrame, Index, Series, MultiIndex, Timestamp,
date_range, read_csv, compat, to_datetime)
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
ensure_clean, slow,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameToCSV(TestData):
def test_to_csv_from_csv1(self):
with ensure_clean('__tmp_to_csv_from_csv1__') as path:
self.frame['A'][:5] = nan
self.frame.to_csv(path)
self.frame.to_csv(path, columns=['A', 'B'])
self.frame.to_csv(path, header=False)
self.frame.to_csv(path, index=False)
# test roundtrip
self.tsframe.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.tsframe, recons)
self.tsframe.to_csv(path, index_label='index')
recons = DataFrame.from_csv(path, index_col=None)
assert(len(recons.columns) == len(self.tsframe.columns) + 1)
# no index
self.tsframe.to_csv(path, index=False)
recons = DataFrame.from_csv(path, index_col=None)
assert_almost_equal(self.tsframe.values, recons.values)
# corner case
dm = DataFrame({'s1': Series(lrange(3), lrange(3)),
's2': Series(lrange(2), lrange(2))})
dm.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self):
with ensure_clean('__tmp_to_csv_from_csv2__') as path:
# duplicate index
df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],
columns=['x', 'y', 'z'])
df.to_csv(path)
result = DataFrame.from_csv(path)
assert_frame_equal(result, df)
midx = MultiIndex.from_tuples(
[('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx,
columns=['x', 'y', 'z'])
df.to_csv(path)
result = DataFrame.from_csv(path, index_col=[0, 1, 2],
parse_dates=False)
# TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it
# ?
assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_csv(path, header=col_aliases)
rs = DataFrame.from_csv(path)
xp = self.frame2.copy()
xp.columns = col_aliases
assert_frame_equal(xp, rs)
pytest.raises(ValueError, self.frame2.to_csv, path,
header=['AA', 'X'])
def test_to_csv_from_csv3(self):
with ensure_clean('__tmp_to_csv_from_csv3__') as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode='a', header=False)
xp = pd.concat([df1, df2])
rs = pd.read_csv(path, index_col=0)
rs.columns = lmap(int, rs.columns)
xp.columns = lmap(int, xp.columns)
assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with ensure_clean('__tmp_to_csv_from_csv4__') as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = pd.DataFrame({'dt_data': [i * dt for i in range(3)]},
index=pd.Index([i * dt for i in range(3)],
name='dt_index'))
df.to_csv(path)
result = pd.read_csv(path, index_col='dt_index')
result.index = pd.to_timedelta(result.index)
# TODO: remove renaming when GH 10875 is solved
result.index = result.index.rename('dt_index')
result['dt_data'] = pd.to_timedelta(result['dt_data'])
assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self):
# tz, 8260
with ensure_clean('__tmp_to_csv_from_csv5__') as path:
self.tzframe.to_csv(path)
result = pd.read_csv(path, index_col=0, parse_dates=['A'])
converter = lambda c: to_datetime(result[c]).dt.tz_localize(
'UTC').dt.tz_convert(self.tzframe[c].dt.tz)
result['B'] = converter('B')
result['C'] = converter('C')
assert_frame_equal(result, self.tzframe)
def test_to_csv_cols_reordering(self):
# GH3454
import pandas as pd
chunksize = 5
N = int(chunksize * 2.5)
df = mkdf(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = pd.read_csv(path, index_col=0)
assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
import pandas as pd
def _check_df(df, cols=None):
with ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = pd.read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(
cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
assert_series_equal(obj_df, obj_rs)
else:
assert_frame_equal(
obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = mkdf(N, 3)
df.columns = ['a', 'a', 'b']
_check_df(df, None)
# dupe cols with selection
cols = ['b', 'a']
_check_df(df, cols)
@slow
def test_to_csv_dtnat(self):
# GH3437
from pandas import NaT
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range('2000', freq='5min', periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with ensure_clean('1.csv') as pth:
df = DataFrame(dict(a=s1, b=s2))
df.to_csv(pth, chunksize=chunksize)
recons = DataFrame.from_csv(pth)._convert(datetime=True,
coerce=True)
assert_frame_equal(df, recons, check_names=False,
check_less_precise=True)
@slow
def test_to_csv_moar(self):
def _do_test(df, r_dtype=None, c_dtype=None,
rnlvl=None, cnlvl=None, dupe_col=False):
kwargs = dict(parse_dates=False)
if cnlvl:
if rnlvl is not None:
kwargs['index_col'] = lrange(rnlvl)
kwargs['header'] = lrange(cnlvl)
with ensure_clean('__tmp_to_csv_moar__') as path:
df.to_csv(path, encoding='utf8',
chunksize=chunksize, tupleize_cols=False)
recons = DataFrame.from_csv(
path, tupleize_cols=False, **kwargs)
else:
kwargs['header'] = 0
with ensure_clean('__tmp_to_csv_moar__') as path:
df.to_csv(path, encoding='utf8', chunksize=chunksize)
recons = DataFrame.from_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, compat.text_type):
return x.decode('utf8')
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[
:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1:]
type_map = dict(i='i', f='f', s='O', u='O', dt='O', p='O')
if r_dtype:
if r_dtype == 'u': # unicode
r_dtype = 'O'
recons.index = np.array(lmap(_to_uni, recons.index),
dtype=r_dtype)
df.index = np.array(lmap(_to_uni, df.index), dtype=r_dtype)
elif r_dtype == 'dt': # unicode
r_dtype = 'O'
recons.index = np.array(lmap(Timestamp, recons.index),
dtype=r_dtype)
df.index = np.array(
lmap(Timestamp, df.index), dtype=r_dtype)
elif r_dtype == 'p':
r_dtype = 'O'
recons.index = np.array(
list(map(Timestamp, to_datetime(recons.index))),
dtype=r_dtype)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())),
dtype=r_dtype)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == 'u':
c_dtype = 'O'
recons.columns = np.array(lmap(_to_uni, recons.columns),
dtype=c_dtype)
df.columns = np.array(
lmap(_to_uni, df.columns), dtype=c_dtype)
elif c_dtype == 'dt':
c_dtype = 'O'
recons.columns = np.array(lmap(Timestamp, recons.columns),
dtype=c_dtype)
df.columns = np.array(
lmap(Timestamp, df.columns), dtype=c_dtype)
elif c_dtype == 'p':
c_dtype = 'O'
recons.columns = np.array(
lmap(Timestamp, to_datetime(recons.columns)),
dtype=c_dtype)
df.columns = np.array(
lmap(Timestamp, df.columns.to_timestamp()),
dtype=c_dtype)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
assert_frame_equal(df, recons, check_names=False,
check_less_precise=True)
N = 100
chunksize = 1000
for ncols in [4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type='dt',
c_idx_type='s'), 'dt', 's')
for ncols in [4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type='dt',
c_idx_type='s'), 'dt', 's')
pass
for r_idx_type, c_idx_type in [('i', 'i'), ('s', 's'), ('u', 'dt'),
('p', 'p')]:
for ncols in [1, 2, 3, 4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type=r_idx_type,
c_idx_type=c_idx_type),
r_idx_type, c_idx_type)
for ncols in [1, 2, 3, 4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = mkdf(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=lrange(10)))
_do_test(mkdf(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(mkdf(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(mkdf(nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2),
rnlvl=2, cnlvl=2)
def test_to_csv_from_csv_w_some_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['G'] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < .5]
self.frame['H'] = self.frame.index.map(f)
with ensure_clean() as path:
self.frame.to_csv(path)
recons = DataFrame.from_csv(path)
# TODO to_csv drops column name
assert_frame_equal(self.frame, recons, check_names=False)
assert_frame_equal(np.isinf(self.frame),
np.isinf(recons), check_names=False)
def test_to_csv_from_csv_w_all_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['E'] = np.inf
self.frame['F'] = -np.inf
with ensure_clean() as path:
self.frame.to_csv(path)
recons = DataFrame.from_csv(path)
# TODO to_csv drops column name
assert_frame_equal(self.frame, recons, check_names=False)
assert_frame_equal(np.isinf(self.frame),
np.isinf(recons), check_names=False)
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with ensure_clean('__tmp_to_csv_no_index__') as path:
df = DataFrame({'c1': [1, 2, 3], 'c2': [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df, result)
df['c3'] = Series([7, 8, 9], dtype='int64')
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ['a', 'b', 'c'],
1: ['aa', 'bb', 'cc']})
df['test'] = 'txt'
assert df.to_csv() == df.to_csv(columns=[0, 1, 'test'])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y'])
with ensure_clean('__tmp_to_csv_headers__') as path:
from_df.to_csv(path, header=['X', 'Y'])
recons = DataFrame.from_csv(path)
assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=['X', 'Y'])
recons = DataFrame.from_csv(path)
recons.reset_index(inplace=True)
assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self):
frame = self.frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])
frame.index = new_index
with ensure_clean('__tmp_to_csv_multiindex__') as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=['A', 'B'])
# round trip
frame.to_csv(path)
df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUP becomes a classmethod
self.frame.index = old_index
# try multiindex with dates
tsframe = self.tsframe
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=['time', 'foo'])
recons = DataFrame.from_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = DataFrame.from_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = DataFrame.from_csv(path, index_col=None)
assert_almost_equal(recons.values, self.tsframe.values)
# needed if setUP becomes classmethod
self.tsframe.index = old_index
with ensure_clean('__tmp_to_csv_multiindex__') as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ['first', 'second']
return DataFrame(np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[('bah', 'foo'),
('bah', 'bar'),
('ban', 'baz')], names=names),
dtype='int64')
# column & index are multi-index
df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path, tupleize_cols=False)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
assert_frame_equal(df, result)
# column is mi
df = mkdf(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path, tupleize_cols=False)
result = read_csv(
path, header=[0, 1, 2, 3], index_col=0, tupleize_cols=False)
assert_frame_equal(df, result)
# dup column names?
df = mkdf(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path, tupleize_cols=False)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[
0, 1, 2], tupleize_cols=False)
assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, tupleize_cols=False, index=False)
result = read_csv(path, header=[0, 1], tupleize_cols=False)
assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, tupleize_cols=False, index=False)
result = read_csv(path, header=[0, 1], tupleize_cols=False)
assert all([x is None for x in result.columns.names])
result.columns.names = df.columns.names
assert_frame_equal(df, result)
# tupleize_cols=True and index=False
df = _make_frame(True)
df.to_csv(path, tupleize_cols=True, index=False)
result = read_csv(
path, header=0, tupleize_cols=True, index_col=None)
result.columns = df.columns
assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path, tupleize_cols=False)
result = read_csv(path, header=[0, 1], index_col=[
0], tupleize_cols=False)
assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path, tupleize_cols=False)
result = read_csv(path, header=[0, 1], index_col=[
0], tupleize_cols=False)
assert_frame_equal(df, result)
# column & index are multi-index (compatibility)
df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path, tupleize_cols=True)
result = read_csv(path, header=0, index_col=[
0, 1], tupleize_cols=True)
result.columns = df.columns
assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path, tupleize_cols=False)
for i in [6, 7]:
msg = 'len of {i}, but only 5 lines in file'.format(i=i)
with tm.assert_raises_regex(ParserError, msg):
read_csv(path, tupleize_cols=False,
header=lrange(i), index_col=0)
# write with cols
with tm.assert_raises_regex(TypeError, 'cannot specify cols '
'with a MultiIndex'):
df.to_csv(path, tupleize_cols=False, columns=['foo', 'bar'])
with ensure_clean('__tmp_to_csv_multiindex__') as path:
# empty
tsframe[:0].to_csv(path)
recons = DataFrame.from_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(',')[2] == '999'
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})
with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:
df.to_csv(path)
df2 = DataFrame.from_csv(path)
assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return ["%s%03d" % (name, i) for i in range(5)]
df_float = DataFrame(np.random.randn(
100, 5), dtype='float64', columns=create_cols('float'))
df_int = DataFrame(np.random.randn(100, 5),
dtype='int64', columns=create_cols('int'))
df_bool = DataFrame(True, index=df_float.index,
columns=create_cols('bool'))
df_object = DataFrame('foo', index=df_float.index,
columns=create_cols('object'))
df_dt = DataFrame(Timestamp('20010101'),
index=df_float.index, columns=create_cols('date'))
# add in some nans
df_float.loc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = dict()
for n, dtype in [('float', np.float64), ('int', np.int64),
('bool', np.bool), ('object', np.object)]:
for c in create_cols(n):
dtypes[c] = dtype
with ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(filename, index_col=0, dtype=dtypes,
parse_dates=create_cols('date'))
assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(np.random.randn(1000, 30), columns=lrange(
15) + lrange(15), dtype='float64')
with ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype='float64')
df_int = DataFrame(np.random.randn(1000, 3), dtype='int64')
df_bool = DataFrame(True, index=df_float.index, columns=lrange(3))
df_object = DataFrame('foo', index=df_float.index, columns=lrange(3))
df_dt = DataFrame(Timestamp('20010101'),
index=df_float.index, columns=lrange(3))
df = pd.concat([df_float, df_int, df_bool, df_object,
df_dt], axis=1, ignore_index=True)
cols = []
for i in range(5):
cols.extend([0, 1, 2])
df.columns = cols
with ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ['0.4', '1.4', '2.4']:
result[i] = to_datetime(result[i])
result.columns = df.columns
assert_frame_equal(result, df)
# GH3457
from pandas.util.testing import makeCustomDataframe as mkdf
N = 10
df = mkdf(N, 3)
df.columns = ['a', 'a', 'b']
with ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={'a.1': 'a'})
assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({'A': lrange(100000)})
aa['B'] = aa.A + 1.0
aa['C'] = aa.A + 2.0
aa['D'] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
assert_frame_equal(rs, aa)
@slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO('a,1.0\nb,2.0')
df = DataFrame.from_csv(f1, header=None)
newdf = DataFrame({'t': df[df.columns[0]]})
with ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
with ensure_clean() as path:
df.to_csv(path, encoding='UTF-8')
df2 = read_csv(path, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
df.to_csv(path, encoding='UTF-8', index=False)
df2 = read_csv(path, index_col=None, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO('')
df = DataFrame(
[[u("\u05d0"), "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=[u("\u05d0"),
u("\u05d1"), u("\u05d2"), u("\u05d3")],
index=[u("\u05d0"), u("\u05d1")])
df.to_csv(buf, encoding='UTF-8')
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_stringio(self):
buf = StringIO()
self.frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
# TODO to_csv drops column name
assert_frame_equal(recons, self.frame, check_names=False)
def test_to_csv_float_format(self):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, float_format='%.2f')
rs = read_csv(filename, index_col=0)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
assert_frame_equal(rs, xp)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC,
encoding='utf-8')
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
assert result == expected
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({'A': ['hello', '{"hello"}']})
for encoding in (None, 'utf-8'):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE,
encoding=encoding, index=False)
result = buf.getvalue()
expected = 'A\nhello\n{"hello"}\n'
assert result == expected
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected = ('A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
assert buf.getvalue() == expected
def test_to_csv_line_terminators(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, line_terminator='\r\n')
expected = (',A,B\r\n'
'one,1,4\r\n'
'two,2,5\r\n'
'three,3,6\r\n')
assert buf.getvalue() == expected
buf = StringIO()
df.to_csv(buf) # The default line terminator remains \n
expected = (',A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
assert buf.getvalue() == expected
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output as when one
# would add a "normal" Series/DataFrame.
s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
res = StringIO()
s.to_csv(res)
exp = StringIO()
s2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
df = DataFrame({"s": s})
df2 = DataFrame({"s": s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
def test_to_csv_path_is_none(self):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = self.frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
def test_to_csv_compression_gzip(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="gzip")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="gzip", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is gziped
import gzip
f = gzip.open(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
assert col in text
def test_to_csv_compression_bz2(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="bz2")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="bz2", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is bz2ed
import bz2
f = bz2.BZ2File(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
assert col in text
def test_to_csv_compression_xz(self):
# GH11852
# use the compression kw in to_csv
tm._skip_if_no_lzma()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="xz")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="xz", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is xzipped
lzma = compat.import_lzma()
f = lzma.open(filename, 'rb')
assert_frame_equal(df, read_csv(f, index_col=0))
f.close()
def test_to_csv_compression_value_error(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
# zip compression is not supported and should raise ValueError
import zipfile
pytest.raises(zipfile.BadZipfile, df.to_csv,
filename, compression="zip")
def test_to_csv_date_format(self):
with ensure_clean('__tmp_to_csv_date_format__') as path:
dt_index = self.tsframe.index
datetime_frame = DataFrame(
{'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)
datetime_frame.to_csv(path, date_format='%Y%m%d')
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(
lambda x: int(x.strftime('%Y%m%d')))
datetime_frame_int.index = datetime_frame_int.index.map(
lambda x: int(x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format='%Y-%m-%d')
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(
lambda x: x.strftime('%Y-%m-%d'))
datetime_frame_str.index = datetime_frame_str.index.map(
lambda x: x.strftime('%Y-%m-%d'))
assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format='%Y%m%d')
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(
lambda x: int(x.strftime('%Y%m%d')))
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = (
datetime_frame_columns.columns
.map(lambda x: x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(
['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])
nat_frame = DataFrame({'A': nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format='%Y-%m-%d')
test = read_csv(path, parse_dates=[0, 1], index_col=0)
assert_frame_equal(test, nat_frame)
def test_to_csv_with_dst_transitions(self):
with ensure_clean('csv_date_format_with_dst') as path:
# make sure we are not failing on transitions
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times + pd.Timedelta('10s')]:
time_range = np.array(range(len(i)), dtype='int64')
df = DataFrame({'A': time_range}, index=i)
df.to_csv(path, index=True)
# we have to reconvert the index as we
# don't parse the tz's
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index).tz_localize(
'UTC').tz_convert('Europe/London')
assert_frame_equal(result, df)
# GH11619
idx = pd.date_range('2015-01-01', '2015-12-31',
freq='H', tz='Europe/Paris')
df = DataFrame({'values': 1, 'idx': idx},
index=idx)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_csv(path, index=True)
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index).tz_localize(
'UTC').tz_convert('Europe/Paris')
result['idx'] = to_datetime(result['idx']).astype(
'datetime64[ns, Europe/Paris]')
assert_frame_equal(result, df)
# assert working
df.astype(str)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_pickle(path)
result = pd.read_pickle(path)
assert_frame_equal(result, df)
def test_to_csv_quoting(self):
df = DataFrame({
'c_string': ['a', 'b,c'],
'c_int': [42, np.nan],
'c_float': [1.0, 3.2],
'c_bool': [True, False],
})
expected = """\
,c_bool,c_float,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,"b,c"
"""
result = df.to_csv()
assert result == expected
result = df.to_csv(quoting=None)
assert result == expected
result = df.to_csv(quoting=csv.QUOTE_MINIMAL)
assert result == expected
expected = """\
"","c_bool","c_float","c_int","c_string"
"0","True","1.0","42.0","a"
"1","False","3.2","","b,c"
"""
result = df.to_csv(quoting=csv.QUOTE_ALL)
assert result == expected
# see gh-12922, gh-13259: make sure changes to
# the formatters do not break this behaviour
expected = """\
"","c_bool","c_float","c_int","c_string"
0,True,1.0,42.0,"a"
1,False,3.2,"","b,c"
"""
result = df.to_csv(quoting=csv.QUOTE_NONNUMERIC)
assert result == expected
msg = "need to escape, but no escapechar set"
tm.assert_raises_regex(csv.Error, msg, df.to_csv,
quoting=csv.QUOTE_NONE)
tm.assert_raises_regex(csv.Error, msg, df.to_csv,
quoting=csv.QUOTE_NONE,
escapechar=None)
expected = """\
,c_bool,c_float,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,b!,c
"""
result = df.to_csv(quoting=csv.QUOTE_NONE,
escapechar='!')
assert result == expected
expected = """\
,c_bool,c_ffloat,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,bf,c
"""
result = df.to_csv(quoting=csv.QUOTE_NONE,
escapechar='f')
assert result == expected
# see gh-3503: quoting Windows line terminators
# presents with encoding?
text = 'a,b,c\n1,"test \r\n",3\n'
df = pd.read_csv(StringIO(text))
buf = StringIO()
df.to_csv(buf, encoding='utf-8', index=False)
assert buf.getvalue() == text
# xref gh-7791: make sure the quoting parameter is passed through
# with multi-indexes
df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
df = df.set_index(['a', 'b'])
expected = '"a","b","c"\n"1","3","5"\n"2","4","6"\n'
assert df.to_csv(quoting=csv.QUOTE_ALL) == expected
def test_period_index_date_overflow(self):
# see gh-15982
dates = ["1990-01-01", "2000-01-01", "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = pd.DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected = ',0\n1990-01-01,4\n2000-01-01,5\n3005-01-01,6\n'
assert result == expected
date_format = "%m-%d-%Y"
result = df.to_csv(date_format=date_format)
expected = ',0\n01-01-1990,4\n01-01-2000,5\n01-01-3005,6\n'
assert result == expected
# Overflow with pd.NaT
dates = ["1990-01-01", pd.NaT, "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = pd.DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected = ',0\n1990-01-01,4\n,5\n3005-01-01,6\n'
assert result == expected
| mit |
jerabaul29/MaterialForMEK3230 | FlyingSeed_codeExperiment/select_valid_range.py | 1 | 3755 | from scipy import misc
import os
import fnmatch
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
from scipy import signal
from tqdm import tqdm
import pickle
# %matplotlib inline
#################################################################################
# all functions ----------------------------------------------------------------
class generateDataOnClick:
def __init__(self, verbose=0):
self.position_on_click_accumulator = []
self.verbose = verbose
def position_on_click(self, event):
x, y = event.x, event.y
if event.button == 1:
if event.inaxes is not None:
if self.verbose > 0:
print 'data coords:' + str(event.xdata) + " , " + str(event.ydata)
self.position_on_click_accumulator.append((event.xdata, event.ydata))
plt.axvline(event.xdata, color='r')
plt.show()
def return_positions(self):
return self.position_on_click_accumulator
def select_valid_range(verbose=0):
pos_seed = np.asarray(list_pos_seed)
plt.figure()
plt.plot(pos_seed[:, 0], label='x position')
plt.plot(pos_seed[:, 1], label='y position')
plt.xlabel('Frame number')
plt.ylabel('Position seed (pxl)')
plt.legend(loc=2)
generate_data_on_click_object = generateDataOnClick()
plt.connect('button_press_event', generate_data_on_click_object.position_on_click)
plt.show()
selected_positions_pixels = generate_data_on_click_object.return_positions()
x_position_1 = int(np.floor(selected_positions_pixels[0][0]))
x_position_2 = int(np.floor(selected_positions_pixels[1][0]))
data_valid_range = np.array([x_position_1, x_position_2])
# save the valid range
np.savetxt(path + list_cases[ind_case] + '/' + "valid_range.csv", data_valid_range, delimiter=",")
def save_one_result(result_data, result_name):
with open(path + list_cases[ind_case] + '/' + result_name + '.pkl', 'w') as crrt_file:
pickle.dump(result_data, crrt_file, pickle.HIGHEST_PROTOCOL)
def load_one_result(result_name):
with open(path + list_cases[ind_case] + '/' + result_name + '.pkl', 'r') as crrt_file:
result_data = pickle.load(crrt_file)
return result_data
#################################################################################
# analysis of the data
path = '/media/hydroubuntu/Seagate Expansion Drive/data_lab_module_07122016/data_seed/'
sampling_frequency = 30
# loads the calibration --------------------------------------------------------
poly_fit_calibration = np.load(path + 'poly_fit_calibration.npy')
# load list of all cases -------------------------------------------------------
list_cases = []
for file_name in os.listdir(path):
if fnmatch.fnmatch(file_name, 'Test_*'):
list_cases.append(file_name)
print "Cases to process:"
for crrt_case in list_cases:
print crrt_case
print " "
nbr_cases = len(list_cases)
print "Number of cases: " + str(nbr_cases)
# select range on all cases ----------------------------------------------------
for ind_case in range(nbr_cases):
print ""
print "------------------------------------------------------------"
print "Analysing case: " + str(list_cases[ind_case])
path_to_images = path + list_cases[ind_case] + '/'
print "Load generated data"
list_pos_seed = load_one_result('list_pos_seed')
list_width_data_seed = load_one_result('list_width_data_seed')
list_true_wing_tip = load_one_result('list_true_wing_tip')
print ""
print "Select valid range"
print "Click on the figure to select the range to use for later analysis"
print "then close the figure."
select_valid_range()
| mit |
mph-/lcapy | lcapy/noiseomegaexpr.py | 1 | 5062 | """This module provides the AngularFourierNoiseDomainExpression class to represent
omega-domain (angular Fourier domain) noise expressions.
Copyright 2014--2020 Michael Hayes, UCECE
"""
from __future__ import division
from .sym import symsimplify
from .functions import sqrt
from .sym import pi, omegasym, fsym
from .state import state
from .domains import AngularFourierNoiseDomain
from .expr import expr
from .noiseexpr import NoiseExpression
from .fexpr import f, FourierDomainExpression
from .omegaexpr import AngularFourierDomainExpression
import sympy as sym
import numpy as np
class AngularFourierNoiseDomainExpression(AngularFourierNoiseDomain, NoiseExpression):
"""Angular frequency domain (one-sided) noise spectrum expression (amplitude
spectral density).
This characterises a wide-sense stationary, zero-mean Gaussian
noise random process.
When performing arithmetic on two AngularFourierNoiseDomainExpression
expressions it is assumed that they are uncorrelated unless they have
the same nid (noise indentifier). If the nid is not specified, a new one is created.
Uncorrelated noise expressions are added in quadrature (on a power
basis). Thus (AngularFourierNoiseDomainExpression(3) +
AngularFourierNoiseDomainExpression(4)).expr = 5 since 5 =
sqrt(3**2 + 4**2)
AngularFourierNoiseDomainExpression(3) !=
AngularFourierNoiseDomainExpression(3) since they are different
noise realisations albeit with the same properties. However,
AngularFourierNoiseDomainExpression(3).expr ==
AngularFourierNoiseDomainExpression(3).expr. Similarly,
AngularFourierNoiseDomainExpression(3, nid='n1') ==
AngularFourierNoiseDomainExpression(3, nid='n1') since they have
the same noise identifier and thus have the same realisation.
Caution: The sum of two noise expressions generates a noise
expression with a new nid. This can lead to unexpected results
since noise expressions with different nids are assumed to be
uncorrelated. For example, consider:
a = AngularFourierNoiseDomainExpression(3);
b = AngularFourierNoiseDomainExpression(4)
a + b - b gives sqrt(41) and a + b - a gives sqrt(34).
This case is correctly handled by the SuperpositionVoltage and
SuperpositionCurrent classes since each noise component is stored
and considered separately.
(SuperpositionVoltage(a) + SuperpositionVoltage(b) - SuperpositionVoltage(b)).n gives 3 as expected.
"""
var = omegasym
def plot(self, omegavector=None, **kwargs):
"""Plot frequency response at values specified by omegavector.
There are many plotting options, see matplotlib.pyplot.plot.
For example:
V.plot(omegavector, log_frequency=True)
V.real.plot(omegavector, color='black')
V.phase.plot(omegavector, color='black', linestyle='--')
By default complex data is plotted as separate plots of magnitude (dB)
and phase.
"""
from .plot import plot_angular_frequency
return plot_angular_frequency(self, omegavector, **kwargs)
def transform(self, arg, **assumptions):
"""Transform into a different domain."""
arg = expr(arg)
if isinstance(arg, FourierDomainExpression):
result = self.subs(f * 2 * pi)
cls = self._class_by_quantity(self.quantity, 'fourier noise')
return cls(result, nid=self.nid, **assumptions)
elif isinstance(arg, AngularFourierDomainExpression):
result = self.subs(arg, **assumptions)
cls = self._class_by_quantity(self.quantity, 'angular fourier noise')
return cls(result, nid=self.nid, **assumptions)
return super(AngularFourierNoiseDomainExpression, self).transform(arg, **assumptions)
class AngularFourierNoiseDomainVoltage(AngularFourierNoiseDomainExpression):
"""Voltage noise amplitude spectral density (units V/rtrad/s).
This can be a function of angular frequency, omega. For example,
to model an opamp voltage noise:
v = AngularFourierNoiseDomainVoltage(1e-8 / sqrt(omega) + 8e-9)
"""
quantity_label = 'Voltage noise spectral density'
units = 'V/rtrad/s'
class AngularFourierNoiseDomainCurrent(AngularFourierNoiseDomainExpression):
"""Current noise amplitude spectral density (units A/rtrad/s).
This can be a function of angular frequency, omega. For example,
to model an opamp current noise:
i = AngularFourierNoiseDomainCurrent(3e-12 / sqrt(omega) + 200e-15)
"""
quantity_label = 'Current noise spectral density'
units = 'A/rtrad/s'
from .expressionclasses import expressionclasses
classes = expressionclasses.register('angular fourier noise', AngularFourierNoiseDomainExpression, None,
('voltage', 'current'))
AngularFourierNoiseDomainVoltage = classes['voltage']
AngularFourierNoiseDomainCurrent = classes['current']
from .omegaexpr import omega
from .noisefexpr import FourierNoiseDomainExpression
| lgpl-2.1 |
stephenhelms/WormTracker | python/tsstats.py | 1 | 8100 | import numpy as np
import numpy.ma as ma
from numpy import linalg as LA
import matplotlib.pyplot as plt
import itertools
import collections
from scipy import stats
def acf(x, lags=500, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
# from stackexchange
x = x - x.mean() # remove mean
if type(lags) is int:
lags = range(lags)
C = ma.zeros((len(lags),))
sigma2 = x.var()
for i, l in enumerate(lags):
if l == 0:
C[i] = 1
elif l >= x.shape[0]:
C[i] = ma.masked
else:
x0 = x[:-l].copy()
x1 = x[l:].copy()
reject = (exclude[l:]-exclude[:-l])>0
x0[reject] = ma.masked
x1[reject] = ma.masked
C[i] = (x0*x1).mean()/sigma2
return C
def ccf(x, y, lags, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
x = x - x.mean() # remove mean
y = y - y.mean()
if type(lags) is int:
lags = np.arange(-lags,lags)
C = ma.zeros((len(lags),))
sigma2 = x.std()*y.std()
for i, l in enumerate(lags):
if l == 0:
C[i] = (x*y).mean()/sigma2
else:
if l > 0:
x0 = x[:-l].copy()
y1 = y[l:].copy()
else:
x0 = y[:l].copy()
y1 = x[-l:].copy()
reject = (exclude[l:]-exclude[:-l])>0
x0[reject] = ma.masked
y1[reject] = ma.masked
C[i] = (x0*y1).mean()/sigma2
return C
def acv(k, List):
'''
Autocovariance
k is the lag order
'''
y = List.copy()
y = y - y.mean()
if k == 0:
return (y*y).mean()
else:
return (y[:-k]*y[k:]).mean()
def dotacf(x, lags=500, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
if type(lags) is int:
lags = xrange(lags)
C = ma.zeros((len(lags),))
for i, l in enumerate(lags):
if l == 0:
C[i] = (x*x).sum(axis=1).mean()
else:
x0 = x[:-l, :].copy()
x1 = x[l:, :].copy()
reject = (exclude[l:]-exclude[:-l])>0
x0[reject, :] = ma.masked
x1[reject, :] = ma.masked
C[i] = (x0*x1).sum(axis=1).mean()
return C
def pacfe(p,j,List):
'''
Partial autocorrelation function estimates
p is the order of the AR(p) process
j is the coefficient in an AR(p) process
'''
if p==2 and j==1:
return (acf(j,List)*(1-acf(p,List)))/(1-(acf(j,List))**2)
elif p==2 and j==2:
return (acf(2,List)-(acf(1,List))**2)/(1-(acf(1,List))**2)
elif p==j and p!=2 and j!=2:
c=0
for a in range(1,p):
c+=pacfe(p-1,a,List)*acf(p-a,List)
d=0
for b in range(1,p):
d+=pacfe(p-1,b,List)*acf(b,List)
return (acf(p,List)-c)/(1-d)
else:
return pacfe(p-1,j,List)-pacfe(p,p,List)*pacfe(p-1,p-j,List)
def drift(x, lags=500, exclude=None):
if exclude is None:
exclude = np.zeros(x.shape)
exclude = np.cumsum(exclude.astype(int))
if type(lags) is int:
lags = xrange(lags)
mu = ma.zeros((len(lags),))
for i, lag in enumerate(lags):
if lag==0:
mu[i] = 0
elif lag >= x.shape[0]:
mu[i] = ma.masked
else:
x0 = x[lag:].copy()
x1 = x[:-lag].copy()
reject = (exclude[lag:]-exclude[:-lag])>0
x0[reject] = ma.masked
x1[reject] = ma.masked
displacements = x0 - x1
mu[i] = displacements.mean()
return mu
def unwrapma(x):
# Adapted from numpy unwrap, this version ignores missing data
idx = ma.array(np.arange(0,x.shape[0]), mask=x.mask)
idxc = idx.compressed()
xc = x.compressed()
dd = np.diff(xc)
ddmod = np.mod(dd+np.pi, 2*np.pi)-np.pi
ddmod[(ddmod==-np.pi) & (dd > 0)] = np.pi
phc_correct = ddmod - dd
phc_correct[np.abs(dd)<np.pi] = 0
ph_correct = np.zeros(x.shape)
ph_correct[idxc[1:]] = phc_correct
up = x + ph_correct.cumsum()
return up
def nextpow2(n):
'''
Returns the next highest power of 2 from n
'''
m_f = np.log2(n)
m_i = np.ceil(m_f)
return 2**m_i
def phaserand(X, independent=False, reduceHighFreqNoise=True):
'''
Generates a randomized surrogate dataset for X, preserving linear temporal
correlations. If independent is False (default), linear correlations
between columns of x are also preserved.
If X contains missing values, they are filled with the mean of that
channel.
The algorithm works by randomizing the phases in the Fourier domain. For
non-independent shuffling, the same random phases are used for each
channel.
References:
Theiler, J., Eubank, S., Longtin, A., Galdrikian, B., & Doyne Farmer, J.
(1992). Testing for nonlinearity in time series: the method of
surrogate data. Physica D: Nonlinear Phenomena, 58(1), 77-94.
Prichard, D. and Theiler, J. (1994) Generating surrogate data for time
series with several simultaneously measured variables. Phys. Rev.
Lett. 73(7), 951-954.
Podobnik, B., Fu, D. F., Stanley, H. E., & Ivanov, P. C. (2007).
Power-law autocorrelated stochastic processes with long-range
cross-correlations. The European Physical Journal B, 56(1), 47-52.
'''
# Deal with array vs matrix by adding new axis
if len(X.shape) == 1:
X = X[:, np.newaxis]
# Deal with missing data
if isinstance(X, ma.MaskedArray):
# truncate all missing data at beginning and end
idxNotAllMissing = (~np.all(X.mask, axis=1)).nonzero()[0]
X = X[idxNotAllMissing[0]:idxNotAllMissing[-1], :]
X = X.filled(X.mean(axis=0)) # fill interior mask with the mean
# Reduce high-frequency noise by min difference between first and last
if reduceHighFreqNoise:
delta = X - X[0, :]
threshold = 1e-3*np.std(X, axis=0)
# find last pt in which all the channels are about the same as the beginning
# and also the index is even
goodEndPt = np.nonzero((np.all(np.abs(delta) < threshold, axis=1)) &
(np.arange(0, X.shape[1]) % 2 == 0))[0][-1]
if goodEndPt > X.shape[0]/2: # make sure we keep at least half the data
X = X[:goodEndPt, :]
# Fourier transform and extract amplitude and phases
# The frequencies are shifted so 0 is centered (fftshift)
N = X.shape[0] #int(nextpow2(X.shape[0])) # size for FFT
if N % 2 != 0:
N = N-1
h = np.floor(N/2) # half the length of the data
Z = np.fft.fft(X, N, axis=0)
M = np.fft.fftshift(np.abs(Z), axes=0) # the amplitudes
phase = np.fft.fftshift(np.angle(Z), axes=0) # the original phases
# Randomize the phases. The phases need to be symmetric for postivie and
# negative frequencies.
if independent: # generate random phases for each channel
randphase = 2.*np.pi*np.random.rand((h-1, X.shape[1])) # random phases
newphase = np.zeros((N, X.shape[1])) # new phases to use
newphase[0, :] = phase[0, :] # keep the zero freq (don't know why)
newphase[1:h, :] = randphase[::-1, :]
newphase[h, :] = phase[h, :]
newphase[h+1:, :] = -randphase
else: # generate one set of random phases (same as above)
randphase = 2.*np.pi*np.random.rand(h-1)
newphase = np.zeros((N, X.shape[1]))
newphase[0, :] = phase[0, :]
newphase[1:h, :] = randphase[::-1, np.newaxis]
newphase[h, :] = phase[h, :]
newphase[h+1:, :] = -randphase[:, np.newaxis]
# Reconstruct the signal from the original amplitude and the new phases
z2 = M*np.exp(newphase*1.j)
# Return the time-domain signal
return np.fft.ifft(np.fft.ifftshift(z2, axes=0),
axis=0).real.squeeze()
| apache-2.0 |
cauchycui/scikit-learn | sklearn/tests/test_multiclass.py | 72 | 24581 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
DaveBackus/Data_Bootcamp | Code/SQL/SQL_support_code.py | 2 | 14108 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pandas as pd
import sqlite3
import xlrd as xl
# if this .sqlite db doesn't already exists, this will create it
# if the .sqlite db *does* already exist, this establishes the desired connection
con = sqlite3.connect("sql_sample_db.sqlite")
# create pandas dataframes from each .csv file:
sales_table = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/sales_table.csv')
car_table = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/car_table.csv')
salesman_table = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/salesman_table.csv')
cust_table = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/cust_table.csv')
dog_table = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/dog_table.csv')
cat_table = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/cat_table.csv')
#%%
# make a list of the tables (dataframes) and table names:
tables = [sales_table, car_table, salesman_table, cust_table, dog_table, cat_table]
table_names = ['sales_table', 'car_table', 'salesman_table', 'cust_table', 'dog_table', 'cat_table']
# drop each table name if it already exists to avoid error if you rerun this bit of code
# then add it back (or add it for the first time, if the table didn't already exist)
for i in range(len(tables)):
table_name = table_names[i]
table = tables[i]
con.execute("DROP TABLE IF EXISTS {}".format(table_name))
pd.io.sql.to_sql(table, "{}".format(table_name), con, index=False)
# Function to make it easy to run queries on this mini-database
def run(query):
results = pd.read_sql("{}".format(query), con).fillna(' ')
return results
# create some dataframes to act as keys to clarify differences between difference rdbms
rdbms_differences = pd.DataFrame()
# show describe options
describe_index = ['Reading a table']
describe_differences = pd.DataFrame({'SQLite' : pd.Series(['PRAGMA TABLE_INFO(table_name)'], index=describe_index),
'MySQL' : pd.Series(['DESCRIBE table_name'], index=describe_index),
'Microsoft SQL Server' : pd.Series(['SP_HELP table_name'], index=describe_index),
'Oracle' : pd.Series(['DESCRIBE table_table'], index=describe_index)})
rdbms_differences = rdbms_differences.append(describe_differences)
# show limit options
limit_df_index = ['LIMITING']
limit_differences = pd.DataFrame({'SQLite' : pd.Series(['LIMIT N'], index=limit_df_index),
'MySQL' : pd.Series(['LIMIT N'], index=limit_df_index),
'Microsoft SQL Server' : pd.Series(['SELECT TOP N column_a...'], index=limit_df_index),
'Oracle' : pd.Series(['WHERE ROWNUM <=N'], index=limit_df_index)})
rdbms_differences = rdbms_differences.append(limit_differences)
# show compatibility with joins and different DBs
join_df_index = ['JOIN or INNER JOIN', 'LEFT JOIN or LEFT OUTER JOIN', 'RIGHT JOIN or RIGHT OUTER JOIN', 'OUTER JOIN or FULL OUTER JOIN']
join_differences = pd.DataFrame({'SQLite' : pd.Series(['✓', '✓', 'not supported', 'not supported'], index=join_df_index),
'MySQL' : pd.Series(['✓', '✓', '✓', 'not supported'], index=join_df_index),
'Microsoft SQL Server' : pd.Series(['✓','✓','✓','✓'], index=join_df_index),
'Oracle' : pd.Series(['✓','✓','✓','✓'], index=join_df_index)})
rdbms_differences = rdbms_differences.append(join_differences)
# show concat options:
concat_df_index = ['Concatenating']
concat_differences = pd.DataFrame({'SQLite' : pd.Series(['||'], index=concat_df_index),
'MySQL' : pd.Series(['CONCAT(column_a, column_b)'], index=concat_df_index),
'Microsoft SQL Server' : pd.Series(['CONCAT(column_a, column_b) or +'], index=concat_df_index),
'Oracle' : pd.Series(['CONCAT(column_a, column_b) or ||'], index=concat_df_index)})
rdbms_differences = rdbms_differences.append(concat_differences)
# show options for IF and CASE WHEN statements
conditional_df_index = ['IF', 'CASE WHEN']
conditional_differences = pd.DataFrame({'SQLite' : pd.Series(['not supported', '✓'], index=conditional_df_index),
'MySQL' : pd.Series(['IF(condition, value_if_true, value_if_false)', '✓'], index=conditional_df_index),
'Microsoft SQL Server' : pd.Series(['IF condition PRINT value_if_true...','✓'], index=conditional_df_index),
'Oracle' : pd.Series(['IF condition THEN value_if_true ELSIF...END IF','✓'], index=conditional_df_index)})
rdbms_differences = rdbms_differences.append(conditional_differences)
rollup_df_index = ['ROLLUP']
rollup_differences = pd.DataFrame({'SQLite' : pd.Series(['not supported'], index=rollup_df_index),
'MySQL' : pd.Series(['GROUP BY column_a WITH ROLLUP'], index=rollup_df_index),
'Microsoft SQL Server' : pd.Series(['GROUP BY column_a WITH ROLLUP'], index=rollup_df_index),
'Oracle' : pd.Series(['GROUP BY ROLLUP (column_a)'], index=rollup_df_index)})
rdbms_differences = rdbms_differences.append(rollup_differences)
# Below are all the cheats to the challenges
describe_cheat = '''PRAGMA TABLE_INFO(car_table)'''
select_cheat1 = '''
SELECT
*
FROM
car_table
'''
select_cheat2 = '''
SELECT
model_id,
model
FROM
car_table
'''
select_cheat3 = '''
SELECT
DISTINCT salesman_id
FROM
sales_table
'''
where_cheat1 = '''
SELECT
*
FROM
sales_table
WHERE
payment_type != 'cash'
AND model_id IN (31,36)
'''
where_cheat2 = '''
SELECT
*
FROM
sales_table
WHERE
revenue BETWEEN 24000 AND 25000
'''
where_cheat3 = '''
SELECT
*
FROM
car_table
WHERE
model LIKE 'out%'
'''
order_cheat = '''
SELECT
*
FROM
car_table
ORDER BY
sticker_price DESC
'''
alias_cheat = '''
SELECT
model_id AS Model,
revenue AS Rev
FROM
sales_table
WHERE
Model = 36
ORDER BY
Rev DESC
'''
join_cheat1 = '''
SELECT
*
FROM
sales_table
JOIN cust_table on sales_table.customer_id = cust_table.customer_id
'''
join_cheat2 = '''
SELECT
gender,
revenue
FROM
sales_table
JOIN cust_table on sales_table.customer_id = cust_table.customer_id
'''
join_cheat3 = '''
SELECT
sales_table.customer_id,
gender,
revenue
FROM
sales_table
JOIN cust_table on sales_table.customer_id = cust_table.customer_id
**OR**
SELECT
cust_table.customer_id,
gender,
revenue
FROM
sales_table
JOIN cust_table on sales_table.customer_id = cust_table.customer_id
'''
join_cheat4 = '''
SELECT
*
FROM
sales_table
JOIN salesman_table ON sales_table.salesman_id = salesman_table.id
'''
join_cheat5 = '''
SELECT
S.id AS transaction_id,
S.salesman_id as sales_table_salesman_id,
SM.id as salesman_table_id
FROM
sales_table AS S
JOIN salesman_table AS SM ON S.salesman_id = SM.id
'''
inner_join_cheat = '''
SELECT
*
FROM
Dog_Table D
JOIN Cat_Table C ON D.Owner_Name = C.Owner_Name
-- OR, to make it cleaner --
SELECT
D.Owner_Name, Dog_Name, Cat_Name
FROM
Dog_Table D
JOIN Cat_Table C ON D.Owner_Name = C.Owner_Name
'''
left_join_cheat = '''
SELECT
*
FROM
Dog_Table D
LEFT JOIN Cat_Table C ON D.Owner_Name = C.Owner_Name
-- OR, to make it cleaner --
SELECT
C.Owner_Name, Cat_Name, Dog_Name
FROM
Dog_Table D
LEFT JOIN Cat_Table C ON D.Owner_Name = C.Owner_Name
'''
operator_cheat = '''
SELECT
S.id,
C.model,
S.revenue,
C.cogs,
(S.revenue - C.cogs)/(C.cogs) AS gross_profit
FROM
sales_table S
JOIN car_table C on S.model_id = C.model_id
LIMIT 5
'''
concat_cheat = '''
SELECT
model|| ' (' || make || ')' AS 'Model (Make)'
FROM
car_table
'''
avg_cheat = '''
SELECT
ROUND(AVG(cogs), 2) AS AVG_COGS
FROM
car_table
'''
count_cheat = '''
SELECT
COUNT(*) cars
FROM
car_table
--bonus points (run separately)--
SELECT
COUNT(*) Subarus
FROM
car_table
WHERE
make = 'Subaru'
'''
avg_cheat2 = '''
SELECT
AVG(C.sticker_price - S.revenue) AS Avg_Difference
FROM
sales_table S
JOIN car_table C on C.model_id = S.model_id
JOIN cust_table CUST on CUST.customer_id = S.customer_id
WHERE
CUST.age > 35
'''
concat_cheat = '''
SELECT
GROUP_CONCAT(last_name, ', ') AS Last_Names
FROM
salesman_table
'''
group_cheat = '''
SELECT
C.make,
AVG(S.revenue - C.cogs)
FROM
sales_table S
JOIN car_table C on S.model_id = C.model_id
GROUP BY
C.make
--bonus points--
SELECT
C.make as Car_Maker,
ROUND(AVG(S.revenue - C.cogs), 2) AS Avg_Gross_Profit
FROM
sales_table S
JOIN car_table C on S.model_id = C.model_id
JOIN salesman_table SM on S.salesman_id = SM.id
WHERE
SM.first_name = 'Michael'
GROUP BY
Car_Maker
ORDER BY
Avg_Gross_Profit DESC
--solutions to WHERE problem--
SELECT
SM.first_name as Salesman,
C.make as Car_Maker,
ROUND(AVG(S.revenue - C.cogs), 2) AS Avg_Gross_Profit
FROM
sales_table S
JOIN car_table C on S.model_id = C.model_id
JOIN salesman_table SM on S.salesman_id = SM.id
WHERE
SM.first_name = 'Michael'
GROUP BY
Car_Maker
ORDER BY
Avg_Gross_Profit DESC
'''
group_cheat1 = '''
SELECT
make as Maker,
GROUP_CONCAT(model) as Car_Models
FROM
car_table
GROUP BY
Maker
'''
group_cheat2 = '''
SELECT
SM.first_name || ' ' || SM.last_name AS Salesperson,
CUST.gender AS Gender,
SUM(S.revenue) AS Total_Revenue,
COUNT(S.id) AS Cars_Sold
FROM
sales_table S
JOIN salesman_table SM ON S.salesman_id = SM.id
JOIN cust_table CUST ON S.customer_id = CUST.customer_id
GROUP BY
Salesperson,
Gender
'''
having_cheat = '''
SELECT
C.model as Car_Model,
AVG(S.revenue) as Avg_Revenue
FROM
sales_table S
JOIN car_table C on S.model_id = C.model_id
GROUP BY
Car_Model HAVING Avg_Revenue < 18000
'''
having_where_cheat = '''
SELECT
SM.last_name as Salesperson,
ROUND(AVG(S.revenue), 2) as Avg_Revenue
FROM
sales_table S
JOIN salesman_table SM ON S.salesman_id = SM.id
JOIN cust_table CUST ON S.customer_id = CUST.customer_id
WHERE
CUST.gender = 'female'
GROUP BY
Salesperson HAVING Avg_Revenue > 20000
'''
case_cheat = '''
SELECT
C.model as Car_Model,
SUM(CASE WHEN CUST.gender = 'female' THEN S.revenue END) Female_Customer_Revenue,
SUM(CASE WHEN CUST.gender = 'male' THEN S.revenue END) Male_Customer_Revenue
FROM
sales_table S
JOIN car_table C ON S.model_id = C.model_id
JOIN cust_table CUST ON CUST.customer_id = S.customer_id
GROUP BY
Car_Model
'''
case_cheat2 = '''
SELECT
CASE WHEN age BETWEEN 18 AND 24 THEN '18-24 years'
WHEN age BETWEEN 25 AND 34 THEN '25-34 years'
WHEN age BETWEEN 35 AND 44 THEN '35-45 years'
WHEN age BETWEEN 45 AND 54 THEN '45-54 years'
WHEN age BETWEEN 55 AND 64 THEN '55-64 years'
END Age_Group,
SUM(CASE WHEN gender = 'female' THEN 1 END) Female_Customers,
SUM(CASE WHEN gender = 'male' THEN 1 END) Male_Customers
FROM
cust_table
GROUP BY
Age_Group
--bonus points--
SELECT
CASE WHEN age BETWEEN 18 AND 24 THEN '18-24 years'
WHEN age BETWEEN 25 AND 34 THEN '25-34 years'
WHEN age BETWEEN 35 AND 44 THEN '35-45 years'
WHEN age BETWEEN 45 AND 54 THEN '45-54 years'
WHEN age BETWEEN 55 AND 64 THEN '55-64 years'
END Age_Group,
ROUND(SUM(CASE WHEN gender = 'female' THEN 1. END)/COUNT(*), 2) Female_Customers,
ROUND(SUM(CASE WHEN gender = 'male' THEN 1. END)/COUNT(*), 2) Male_Customers
FROM
cust_table
GROUP BY
Age_Group
'''
nest_cheat1 = '''
SELECT
model AS Car_Model,
cogs AS COGs,
(SELECT AVG(cogs) from car_table) AS Average_COGs
FROM
car_table
--bonus points--
SELECT
model AS Car_Model,
cogs AS COGs,
(SELECT AVG(cogs) from car_table) AS Average_COGs,
cogs - (SELECT AVG(cogs) from car_table) AS Difference
FROM
car_table
'''
nest_cheat2 = '''
SELECT
SUM(revenue) as Female_Revenue
FROM
sales_table
WHERE
customer_id in (SELECT customer_id FROM cust_table WHERE gender = 'female')
'''
union_cheat1 = '''
SELECT
model,
cogs
FROM
car_table
UNION ALL
SELECT
'Combined_Average',
ROUND(AVG(cogs), 2)
FROM
car_table
''' | mit |
mclevey/seaborn | seaborn/tests/test_axisgrid.py | 11 | 42805 | import warnings
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
import pandas.util.testing as tm
from .. import axisgrid as ag
from .. import rcmod
from ..palettes import color_palette
from ..distributions import kdeplot
from ..categorical import pointplot
from ..linearmodels import pairplot
from ..utils import categorical_order
rs = np.random.RandomState(0)
old_matplotlib = LooseVersion(mpl.__version__) < "1.4"
class TestFacetGrid(object):
df = pd.DataFrame(dict(x=rs.normal(size=60),
y=rs.gamma(4, size=60),
a=np.repeat(list("abc"), 20),
b=np.tile(list("mn"), 30),
c=np.tile(list("tuv"), 20),
d=np.tile(list("abcdefghij"), 6)))
def test_self_data(self):
g = ag.FacetGrid(self.df)
nt.assert_is(g.data, self.df)
plt.close("all")
def test_self_fig(self):
g = ag.FacetGrid(self.df)
nt.assert_is_instance(g.fig, plt.Figure)
plt.close("all")
def test_self_axes(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
for ax in g.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
plt.close("all")
def test_axes_array_size(self):
g1 = ag.FacetGrid(self.df)
nt.assert_equal(g1.axes.shape, (1, 1))
g2 = ag.FacetGrid(self.df, row="a")
nt.assert_equal(g2.axes.shape, (3, 1))
g3 = ag.FacetGrid(self.df, col="b")
nt.assert_equal(g3.axes.shape, (1, 2))
g4 = ag.FacetGrid(self.df, hue="c")
nt.assert_equal(g4.axes.shape, (1, 1))
g5 = ag.FacetGrid(self.df, row="a", col="b", hue="c")
nt.assert_equal(g5.axes.shape, (3, 2))
for ax in g5.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
plt.close("all")
def test_single_axes(self):
g1 = ag.FacetGrid(self.df)
nt.assert_is_instance(g1.ax, plt.Axes)
g2 = ag.FacetGrid(self.df, row="a")
with nt.assert_raises(AttributeError):
g2.ax
g3 = ag.FacetGrid(self.df, col="a")
with nt.assert_raises(AttributeError):
g3.ax
g4 = ag.FacetGrid(self.df, col="a", row="b")
with nt.assert_raises(AttributeError):
g4.ax
def test_col_wrap(self):
g = ag.FacetGrid(self.df, col="d")
nt.assert_equal(g.axes.shape, (1, 10))
nt.assert_is(g.facet_axis(0, 8), g.axes[0, 8])
g_wrap = ag.FacetGrid(self.df, col="d", col_wrap=4)
nt.assert_equal(g_wrap.axes.shape, (10,))
nt.assert_is(g_wrap.facet_axis(0, 8), g_wrap.axes[8])
nt.assert_equal(g_wrap._ncol, 4)
nt.assert_equal(g_wrap._nrow, 3)
with nt.assert_raises(ValueError):
g = ag.FacetGrid(self.df, row="b", col="d", col_wrap=4)
df = self.df.copy()
df.loc[df.d == "j"] = np.nan
g_missing = ag.FacetGrid(df, col="d")
nt.assert_equal(g_missing.axes.shape, (1, 9))
g_missing_wrap = ag.FacetGrid(df, col="d", col_wrap=4)
nt.assert_equal(g_missing_wrap.axes.shape, (9,))
plt.close("all")
def test_normal_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df)
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="c")
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="a", row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, g.axes[:-1, 1:].flat)
plt.close("all")
def test_wrapped_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df, col="a", col_wrap=2)
npt.assert_array_equal(g._bottom_axes,
g.axes[np.array([1, 2])].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:1].flat)
npt.assert_array_equal(g._left_axes, g.axes[np.array([0, 2])].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[np.array([1])].flat)
npt.assert_array_equal(g._inner_axes, null)
plt.close("all")
def test_figure_size(self):
g = ag.FacetGrid(self.df, row="a", col="b")
npt.assert_array_equal(g.fig.get_size_inches(), (6, 9))
g = ag.FacetGrid(self.df, row="a", col="b", size=6)
npt.assert_array_equal(g.fig.get_size_inches(), (12, 18))
g = ag.FacetGrid(self.df, col="c", size=4, aspect=.5)
npt.assert_array_equal(g.fig.get_size_inches(), (6, 4))
plt.close("all")
def test_figure_size_with_legend(self):
g1 = ag.FacetGrid(self.df, col="a", hue="c", size=4, aspect=.5)
npt.assert_array_equal(g1.fig.get_size_inches(), (6, 4))
g1.add_legend()
nt.assert_greater(g1.fig.get_size_inches()[0], 6)
g2 = ag.FacetGrid(self.df, col="a", hue="c", size=4, aspect=.5,
legend_out=False)
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))
g2.add_legend()
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))
plt.close("all")
def test_legend_data(self):
g1 = ag.FacetGrid(self.df, hue="a")
g1.map(plt.plot, "x", "y")
g1.add_legend()
palette = color_palette(n_colors=3)
nt.assert_equal(g1._legend.get_title().get_text(), "a")
a_levels = sorted(self.df.a.unique())
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(a_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), len(a_levels))
for label, level in zip(labels, a_levels):
nt.assert_equal(label.get_text(), level)
plt.close("all")
def test_legend_data_missing_level(self):
g1 = ag.FacetGrid(self.df, hue="a", hue_order=list("azbc"))
g1.map(plt.plot, "x", "y")
g1.add_legend()
b, g, r, p = color_palette(n_colors=4)
palette = [b, r, p]
nt.assert_equal(g1._legend.get_title().get_text(), "a")
a_levels = sorted(self.df.a.unique())
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(a_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), 4)
for label, level in zip(labels, list("azbc")):
nt.assert_equal(label.get_text(), level)
plt.close("all")
def test_get_boolean_legend_data(self):
self.df["b_bool"] = self.df.b == "m"
g1 = ag.FacetGrid(self.df, hue="b_bool")
g1.map(plt.plot, "x", "y")
g1.add_legend()
palette = color_palette(n_colors=2)
nt.assert_equal(g1._legend.get_title().get_text(), "b_bool")
b_levels = list(map(str, categorical_order(self.df.b_bool)))
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(b_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), len(b_levels))
for label, level in zip(labels, b_levels):
nt.assert_equal(label.get_text(), level)
plt.close("all")
def test_legend_options(self):
g1 = ag.FacetGrid(self.df, hue="b")
g1.map(plt.plot, "x", "y")
g1.add_legend()
def test_legendout_with_colwrap(self):
g = ag.FacetGrid(self.df, col="d", hue='b',
col_wrap=4, legend_out=False)
g.map(plt.plot, "x", "y", linewidth=3)
g.add_legend()
def test_subplot_kws(self):
g = ag.FacetGrid(self.df, subplot_kws=dict(axisbg="blue"))
for ax in g.axes.flat:
nt.assert_equal(ax.get_axis_bgcolor(), "blue")
@skipif(old_matplotlib)
def test_gridspec_kws(self):
ratios = [3, 1, 2]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios, height_ratios=ratios)
g = ag.FacetGrid(self.df, col='c', row='a', gridspec_kws=gskws)
# clear out all ticks
for ax in g.axes.flat:
ax.set_xticks([])
ax.set_yticks([])
g.fig.tight_layout()
widths, heights = np.meshgrid(sizes, sizes)
for n, ax in enumerate(g.axes.flat):
npt.assert_almost_equal(
ax.get_position().width,
widths.flatten()[n],
decimal=2
)
npt.assert_almost_equal(
ax.get_position().height,
heights.flatten()[n],
decimal=2
)
@skipif(old_matplotlib)
def test_gridspec_kws_col_wrap(self):
ratios = [3, 1, 2, 1, 1]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("always")
npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='d',
col_wrap=5, gridspec_kws=gskws)
@skipif(not old_matplotlib)
def test_gridsic_kws_old_mpl(self):
ratios = [3, 1, 2]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios, height_ratios=ratios)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("always")
npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='c',
row='a', gridspec_kws=gskws)
def test_data_generator(self):
g = ag.FacetGrid(self.df, row="a")
d = list(g.facet_data())
nt.assert_equal(len(d), 3)
tup, data = d[0]
nt.assert_equal(tup, (0, 0, 0))
nt.assert_true((data["a"] == "a").all())
tup, data = d[1]
nt.assert_equal(tup, (1, 0, 0))
nt.assert_true((data["a"] == "b").all())
g = ag.FacetGrid(self.df, row="a", col="b")
d = list(g.facet_data())
nt.assert_equal(len(d), 6)
tup, data = d[0]
nt.assert_equal(tup, (0, 0, 0))
nt.assert_true((data["a"] == "a").all())
nt.assert_true((data["b"] == "m").all())
tup, data = d[1]
nt.assert_equal(tup, (0, 1, 0))
nt.assert_true((data["a"] == "a").all())
nt.assert_true((data["b"] == "n").all())
tup, data = d[2]
nt.assert_equal(tup, (1, 0, 0))
nt.assert_true((data["a"] == "b").all())
nt.assert_true((data["b"] == "m").all())
g = ag.FacetGrid(self.df, hue="c")
d = list(g.facet_data())
nt.assert_equal(len(d), 3)
tup, data = d[1]
nt.assert_equal(tup, (0, 0, 1))
nt.assert_true((data["c"] == "u").all())
plt.close("all")
def test_map(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
g.map(plt.plot, "x", "y", linewidth=3)
lines = g.axes[0, 0].lines
nt.assert_equal(len(lines), 3)
line1, _, _ = lines
nt.assert_equal(line1.get_linewidth(), 3)
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_map_dataframe(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
plot = lambda x, y, data=None, **kws: plt.plot(data[x], data[y], **kws)
g.map_dataframe(plot, "x", "y", linestyle="--")
lines = g.axes[0, 0].lines
nt.assert_equal(len(lines), 3)
line1, _, _ = lines
nt.assert_equal(line1.get_linestyle(), "--")
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_set(self):
g = ag.FacetGrid(self.df, row="a", col="b")
xlim = (-2, 5)
ylim = (3, 6)
xticks = [-2, 0, 3, 5]
yticks = [3, 4.5, 6]
g.set(xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks)
for ax in g.axes.flat:
npt.assert_array_equal(ax.get_xlim(), xlim)
npt.assert_array_equal(ax.get_ylim(), ylim)
npt.assert_array_equal(ax.get_xticks(), xticks)
npt.assert_array_equal(ax.get_yticks(), yticks)
plt.close("all")
def test_set_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "a = a | b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "a = a | b = n")
nt.assert_equal(g.axes[1, 0].get_title(), "a = b | b = m")
# Test a provided title
g.set_titles("{row_var} == {row_name} \/ {col_var} == {col_name}")
nt.assert_equal(g.axes[0, 0].get_title(), "a == a \/ b == m")
nt.assert_equal(g.axes[0, 1].get_title(), "a == a \/ b == n")
nt.assert_equal(g.axes[1, 0].get_title(), "a == b \/ b == m")
# Test a single row
g = ag.FacetGrid(self.df, col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "b = n")
# test with dropna=False
g = ag.FacetGrid(self.df, col="b", hue="b", dropna=False)
g.map(plt.plot, 'x', 'y')
plt.close("all")
def test_set_titles_margin_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b", margin_titles=True)
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "b = n")
nt.assert_equal(g.axes[1, 0].get_title(), "")
# Test the row "titles"
nt.assert_equal(g.axes[0, 1].texts[0].get_text(), "a = a")
nt.assert_equal(g.axes[1, 1].texts[0].get_text(), "a = b")
# Test a provided title
g.set_titles(col_template="{col_var} == {col_name}")
nt.assert_equal(g.axes[0, 0].get_title(), "b == m")
nt.assert_equal(g.axes[0, 1].get_title(), "b == n")
nt.assert_equal(g.axes[1, 0].get_title(), "")
plt.close("all")
def test_set_ticklabels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
xlab = [l.get_text() + "h" for l in g.axes[1, 0].get_xticklabels()]
ylab = [l.get_text() for l in g.axes[1, 0].get_yticklabels()]
g.set_xticklabels(xlab)
g.set_yticklabels(rotation=90)
got_x = [l.get_text() + "h" for l in g.axes[1, 1].get_xticklabels()]
got_y = [l.get_text() for l in g.axes[0, 0].get_yticklabels()]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
x, y = np.arange(10), np.arange(10)
df = pd.DataFrame(np.c_[x, y], columns=["x", "y"])
g = ag.FacetGrid(df).map(pointplot, "x", "y")
g.set_xticklabels(step=2)
got_x = [int(l.get_text()) for l in g.axes[0, 0].get_xticklabels()]
npt.assert_array_equal(x[::2], got_x)
g = ag.FacetGrid(self.df, col="d", col_wrap=5)
g.map(plt.plot, "x", "y")
g.set_xticklabels(rotation=45)
g.set_yticklabels(rotation=75)
for ax in g._bottom_axes:
for l in ax.get_xticklabels():
nt.assert_equal(l.get_rotation(), 45)
for ax in g._left_axes:
for l in ax.get_yticklabels():
nt.assert_equal(l.get_rotation(), 75)
plt.close("all")
def test_set_axis_labels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
xlab = 'xx'
ylab = 'yy'
g.set_axis_labels(xlab, ylab)
got_x = [ax.get_xlabel() for ax in g.axes[-1, :]]
got_y = [ax.get_ylabel() for ax in g.axes[:, 0]]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
plt.close("all")
def test_axis_lims(self):
g = ag.FacetGrid(self.df, row="a", col="b", xlim=(0, 4), ylim=(-2, 3))
nt.assert_equal(g.axes[0, 0].get_xlim(), (0, 4))
nt.assert_equal(g.axes[0, 0].get_ylim(), (-2, 3))
plt.close("all")
def test_data_orders(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
nt.assert_equal(g.row_names, list("abc"))
nt.assert_equal(g.col_names, list("mn"))
nt.assert_equal(g.hue_names, list("tuv"))
nt.assert_equal(g.axes.shape, (3, 2))
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bca"),
col_order=list("nm"),
hue_order=list("vtu"))
nt.assert_equal(g.row_names, list("bca"))
nt.assert_equal(g.col_names, list("nm"))
nt.assert_equal(g.hue_names, list("vtu"))
nt.assert_equal(g.axes.shape, (3, 2))
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bcda"),
col_order=list("nom"),
hue_order=list("qvtu"))
nt.assert_equal(g.row_names, list("bcda"))
nt.assert_equal(g.col_names, list("nom"))
nt.assert_equal(g.hue_names, list("qvtu"))
nt.assert_equal(g.axes.shape, (4, 3))
plt.close("all")
def test_palette(self):
rcmod.set()
g = ag.FacetGrid(self.df, hue="c")
nt.assert_equal(g._colors, color_palette(n_colors=3))
g = ag.FacetGrid(self.df, hue="d")
nt.assert_equal(g._colors, color_palette("husl", 10))
g = ag.FacetGrid(self.df, hue="c", palette="Set2")
nt.assert_equal(g._colors, color_palette("Set2", 3))
dict_pal = dict(t="red", u="green", v="blue")
list_pal = color_palette(["red", "green", "blue"], 3)
g = ag.FacetGrid(self.df, hue="c", palette=dict_pal)
nt.assert_equal(g._colors, list_pal)
list_pal = color_palette(["green", "blue", "red"], 3)
g = ag.FacetGrid(self.df, hue="c", hue_order=list("uvt"),
palette=dict_pal)
nt.assert_equal(g._colors, list_pal)
plt.close("all")
def test_hue_kws(self):
kws = dict(marker=["o", "s", "D"])
g = ag.FacetGrid(self.df, hue="c", hue_kws=kws)
g.map(plt.plot, "x", "y")
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
def test_dropna(self):
df = self.df.copy()
hasna = pd.Series(np.tile(np.arange(6), 10), dtype=np.float)
hasna[hasna == 5] = np.nan
df["hasna"] = hasna
g = ag.FacetGrid(df, dropna=False, row="hasna")
nt.assert_equal(g._not_na.sum(), 60)
g = ag.FacetGrid(df, dropna=True, row="hasna")
nt.assert_equal(g._not_na.sum(), 50)
plt.close("all")
@classmethod
def teardown_class(cls):
"""Ensure that all figures are closed on exit."""
plt.close("all")
class TestPairGrid(object):
rs = np.random.RandomState(sum(map(ord, "PairGrid")))
df = pd.DataFrame(dict(x=rs.normal(size=80),
y=rs.randint(0, 4, size=(80)),
z=rs.gamma(3, size=80),
a=np.repeat(list("abcd"), 20),
b=np.repeat(list("abcdefgh"), 10)))
def test_self_data(self):
g = ag.PairGrid(self.df)
nt.assert_is(g.data, self.df)
plt.close("all")
def test_ignore_datelike_data(self):
df = self.df.copy()
df['date'] = pd.date_range('2010-01-01', periods=len(df), freq='d')
result = ag.PairGrid(self.df).data
expected = df.drop('date', axis=1)
tm.assert_frame_equal(result, expected)
plt.close("all")
def test_self_fig(self):
g = ag.PairGrid(self.df)
nt.assert_is_instance(g.fig, plt.Figure)
plt.close("all")
def test_self_axes(self):
g = ag.PairGrid(self.df)
for ax in g.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
plt.close("all")
def test_default_axes(self):
g = ag.PairGrid(self.df)
nt.assert_equal(g.axes.shape, (3, 3))
nt.assert_equal(g.x_vars, ["x", "y", "z"])
nt.assert_equal(g.y_vars, ["x", "y", "z"])
nt.assert_true(g.square_grid)
plt.close("all")
def test_specific_square_axes(self):
vars = ["z", "x"]
g = ag.PairGrid(self.df, vars=vars)
nt.assert_equal(g.axes.shape, (len(vars), len(vars)))
nt.assert_equal(g.x_vars, vars)
nt.assert_equal(g.y_vars, vars)
nt.assert_true(g.square_grid)
plt.close("all")
def test_specific_nonsquare_axes(self):
x_vars = ["x", "y"]
y_vars = ["z", "y", "x"]
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, x_vars)
nt.assert_equal(g.y_vars, y_vars)
nt.assert_true(not g.square_grid)
x_vars = ["x", "y"]
y_vars = "z"
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, list(x_vars))
nt.assert_equal(g.y_vars, list(y_vars))
nt.assert_true(not g.square_grid)
plt.close("all")
def test_specific_square_axes_with_array(self):
vars = np.array(["z", "x"])
g = ag.PairGrid(self.df, vars=vars)
nt.assert_equal(g.axes.shape, (len(vars), len(vars)))
nt.assert_equal(g.x_vars, list(vars))
nt.assert_equal(g.y_vars, list(vars))
nt.assert_true(g.square_grid)
plt.close("all")
def test_specific_nonsquare_axes_with_array(self):
x_vars = np.array(["x", "y"])
y_vars = np.array(["z", "y", "x"])
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, list(x_vars))
nt.assert_equal(g.y_vars, list(y_vars))
nt.assert_true(not g.square_grid)
plt.close("all")
def test_size(self):
g1 = ag.PairGrid(self.df, size=3)
npt.assert_array_equal(g1.fig.get_size_inches(), (9, 9))
g2 = ag.PairGrid(self.df, size=4, aspect=.5)
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 12))
g3 = ag.PairGrid(self.df, y_vars=["z"], x_vars=["x", "y"],
size=2, aspect=2)
npt.assert_array_equal(g3.fig.get_size_inches(), (8, 2))
plt.close("all")
def test_map(self):
vars = ["x", "y", "z"]
g1 = ag.PairGrid(self.df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(self.df, "a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
for k, k_level in enumerate("abcd"):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
plt.close("all")
def test_map_nonsquare(self):
x_vars = ["x"]
y_vars = ["y", "z"]
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
g.map(plt.scatter)
x_in = self.df.x
for i, i_var in enumerate(y_vars):
ax = g.axes[i, 0]
y_in = self.df[i_var]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
plt.close("all")
def test_map_lower(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_lower(plt.scatter)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.triu_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
def test_map_upper(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_upper(plt.scatter)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_map_diag(self):
g1 = ag.PairGrid(self.df)
g1.map_diag(plt.hist)
for ax in g1.diag_axes:
nt.assert_equal(len(ax.patches), 10)
g2 = ag.PairGrid(self.df)
g2.map_diag(plt.hist, bins=15)
for ax in g2.diag_axes:
nt.assert_equal(len(ax.patches), 15)
g3 = ag.PairGrid(self.df, hue="a")
g3.map_diag(plt.hist)
for ax in g3.diag_axes:
nt.assert_equal(len(ax.patches), 40)
plt.close("all")
@skipif(old_matplotlib)
def test_map_diag_and_offdiag(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_offdiag(plt.scatter)
g.map_diag(plt.hist)
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
def test_palette(self):
rcmod.set()
g = ag.PairGrid(self.df, hue="a")
nt.assert_equal(g.palette, color_palette(n_colors=4))
g = ag.PairGrid(self.df, hue="b")
nt.assert_equal(g.palette, color_palette("husl", 8))
g = ag.PairGrid(self.df, hue="a", palette="Set2")
nt.assert_equal(g.palette, color_palette("Set2", 4))
dict_pal = dict(a="red", b="green", c="blue", d="purple")
list_pal = color_palette(["red", "green", "blue", "purple"], 4)
g = ag.PairGrid(self.df, hue="a", palette=dict_pal)
nt.assert_equal(g.palette, list_pal)
list_pal = color_palette(["purple", "blue", "red", "green"], 4)
g = ag.PairGrid(self.df, hue="a", hue_order=list("dcab"),
palette=dict_pal)
nt.assert_equal(g.palette, list_pal)
plt.close("all")
def test_hue_kws(self):
kws = dict(marker=["o", "s", "d", "+"])
g = ag.PairGrid(self.df, hue="a", hue_kws=kws)
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
g = ag.PairGrid(self.df, hue="a", hue_kws=kws,
hue_order=list("dcab"))
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
plt.close("all")
@skipif(old_matplotlib)
def test_hue_order(self):
order = list("dcab")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
@skipif(old_matplotlib)
def test_hue_order_missing_level(self):
order = list("dcaeb")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
def test_nondefault_index(self):
df = self.df.copy().set_index("b")
vars = ["x", "y", "z"]
g1 = ag.PairGrid(df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(df, "a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
for k, k_level in enumerate("abcd"):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot(self):
vars = ["x", "y", "z"]
g = pairplot(self.df)
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot_reg(self):
vars = ["x", "y", "z"]
g = pairplot(self.df, kind="reg")
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot_kde(self):
vars = ["x", "y", "z"]
g = pairplot(self.df, diag_kind="kde")
for ax in g.diag_axes:
nt.assert_equal(len(ax.lines), 1)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
plt.close("all")
@skipif(old_matplotlib)
def test_pairplot_markers(self):
vars = ["x", "y", "z"]
markers = ["o", "x", "s", "d"]
g = pairplot(self.df, hue="a", vars=vars, markers=markers)
nt.assert_equal(g.hue_kws["marker"], markers)
plt.close("all")
with nt.assert_raises(ValueError):
g = pairplot(self.df, hue="a", vars=vars, markers=markers[:-2])
@classmethod
def teardown_class(cls):
"""Ensure that all figures are closed on exit."""
plt.close("all")
class TestJointGrid(object):
rs = np.random.RandomState(sum(map(ord, "JointGrid")))
x = rs.randn(100)
y = rs.randn(100)
x_na = x.copy()
x_na[10] = np.nan
x_na[20] = np.nan
data = pd.DataFrame(dict(x=x, y=y, x_na=x_na))
def test_margin_grid_from_arrays(self):
g = ag.JointGrid(self.x, self.y)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
plt.close("all")
def test_margin_grid_from_series(self):
g = ag.JointGrid(self.data.x, self.data.y)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
plt.close("all")
def test_margin_grid_from_dataframe(self):
g = ag.JointGrid("x", "y", self.data)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
plt.close("all")
def test_margin_grid_axis_labels(self):
g = ag.JointGrid("x", "y", self.data)
xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()
nt.assert_equal(xlabel, "x")
nt.assert_equal(ylabel, "y")
g.set_axis_labels("x variable", "y variable")
xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()
nt.assert_equal(xlabel, "x variable")
nt.assert_equal(ylabel, "y variable")
plt.close("all")
def test_dropna(self):
g = ag.JointGrid("x_na", "y", self.data, dropna=False)
nt.assert_equal(len(g.x), len(self.x_na))
g = ag.JointGrid("x_na", "y", self.data, dropna=True)
nt.assert_equal(len(g.x), pd.notnull(self.x_na).sum())
plt.close("all")
def test_axlims(self):
lim = (-3, 3)
g = ag.JointGrid("x", "y", self.data, xlim=lim, ylim=lim)
nt.assert_equal(g.ax_joint.get_xlim(), lim)
nt.assert_equal(g.ax_joint.get_ylim(), lim)
nt.assert_equal(g.ax_marg_x.get_xlim(), lim)
nt.assert_equal(g.ax_marg_y.get_ylim(), lim)
def test_marginal_ticks(self):
g = ag.JointGrid("x", "y", self.data)
nt.assert_true(~len(g.ax_marg_x.get_xticks()))
nt.assert_true(~len(g.ax_marg_y.get_yticks()))
plt.close("all")
def test_bivariate_plot(self):
g = ag.JointGrid("x", "y", self.data)
g.plot_joint(plt.plot)
x, y = g.ax_joint.lines[0].get_xydata().T
npt.assert_array_equal(x, self.x)
npt.assert_array_equal(y, self.y)
plt.close("all")
def test_univariate_plot(self):
g = ag.JointGrid("x", "x", self.data)
g.plot_marginals(kdeplot)
_, y1 = g.ax_marg_x.lines[0].get_xydata().T
y2, _ = g.ax_marg_y.lines[0].get_xydata().T
npt.assert_array_equal(y1, y2)
plt.close("all")
def test_plot(self):
g = ag.JointGrid("x", "x", self.data)
g.plot(plt.plot, kdeplot)
x, y = g.ax_joint.lines[0].get_xydata().T
npt.assert_array_equal(x, self.x)
npt.assert_array_equal(y, self.x)
_, y1 = g.ax_marg_x.lines[0].get_xydata().T
y2, _ = g.ax_marg_y.lines[0].get_xydata().T
npt.assert_array_equal(y1, y2)
plt.close("all")
def test_annotate(self):
g = ag.JointGrid("x", "y", self.data)
rp = stats.pearsonr(self.x, self.y)
g.annotate(stats.pearsonr)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "pearsonr = %.2g; p = %.2g" % rp)
g.annotate(stats.pearsonr, stat="correlation")
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "correlation = %.2g; p = %.2g" % rp)
def rsquared(x, y):
return stats.pearsonr(x, y)[0] ** 2
r2 = rsquared(self.x, self.y)
g.annotate(rsquared)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "rsquared = %.2g" % r2)
template = "{stat} = {val:.3g} (p = {p:.3g})"
g.annotate(stats.pearsonr, template=template)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, template.format(stat="pearsonr",
val=rp[0], p=rp[1]))
plt.close("all")
def test_space(self):
g = ag.JointGrid("x", "y", self.data, space=0)
joint_bounds = g.ax_joint.bbox.bounds
marg_x_bounds = g.ax_marg_x.bbox.bounds
marg_y_bounds = g.ax_marg_y.bbox.bounds
nt.assert_equal(joint_bounds[2], marg_x_bounds[2])
nt.assert_equal(joint_bounds[3], marg_y_bounds[3])
@classmethod
def teardown_class(cls):
"""Ensure that all figures are closed on exit."""
plt.close("all")
| bsd-3-clause |
treycausey/scikit-learn | examples/svm/plot_svm_regression.py | 4 | 1429 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
###############################################################################
# Generate sample data
import numpy as np
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
from sklearn.svm import SVR
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
import pylab as pl
pl.scatter(X, y, c='k', label='data')
pl.hold('on')
pl.plot(X, y_rbf, c='g', label='RBF model')
pl.plot(X, y_lin, c='r', label='Linear model')
pl.plot(X, y_poly, c='b', label='Polynomial model')
pl.xlabel('data')
pl.ylabel('target')
pl.title('Support Vector Regression')
pl.legend()
pl.show()
| bsd-3-clause |
Harefoot/TurboQUALTX | ET_Utils/WASP_Utils/__init__.py | 1 | 11151 |
import numpy as np
import pandas as pd
import os
import datetime
from scipy.interpolate import interp1d
def GetDateTimeVal(df):
DateTimeVal = []
YEAR = np.asarray(df['YEAR'],dtype = int)
MONTH = np.asarray(df['MONTH'],dtype = int)
DAY = np.asarray(df['DAY'],dtype = int)
HOUR = np.asarray(df['HOUR'],dtype = int)
MINUTE = np.asarray(df['MINUTE'],dtype = int)
for i in range(0,len(df)):
DateTimeVal.append(datetime.datetime(YEAR[i],MONTH[i],DAY[i],HOUR[i],MINUTE[i]))
return DateTimeVal
#Function for unpivot a dataframe
def unpivot(df,Keep_VARIABLEs):
#Unpivoting
N, K = df.shape
data = {'VARIABLE' : np.asarray(df.columns).repeat(N),
'VALUE' : df.values.ravel('F')}
up = pd.DataFrame(data, columns=['VARIABLE', 'VALUE'])
#Append Keep_VARIABLEs as columns
for KV in Keep_VARIABLEs:
#np.tile constructs an array by repeating A the number of times given by reps
up[KV] = np.tile(np.asarray(df[KV]), K)
#filter out the VARIABLEs that we are already including as columns
for KV in Keep_VARIABLEs:
up = up[(up['VARIABLE']<> KV)]
#Reorder the columns in the data frame such that the Keep_VARIABLEs
#are on the left hand side and then followed by VARIABLE and VALUE
reorder = Keep_VARIABLEs
reorder.append('VARIABLE')
reorder.append('VALUE')
up = up[reorder]
return up
def parse_WASPdays_datenum(df,basedate):
DAYZ = np.asarray(df['DAYS'],dtype = float)
N = len(df['DAYS'])
YEAR = np.zeros(N)
MONTH = np.zeros(N)
DAY = np.zeros(N)
HOUR = np.zeros(N)
MINUTE = np.zeros(N)
YYYYMMDD = np.zeros(N)
DATETIME = []
for i in range(0,N):
DAYe = DAYZ[i]
python_datetime = basedate + datetime.timedelta(DAYe)
YEAR[i] = python_datetime.year
MONTH[i] = python_datetime.month
DAY[i] = python_datetime.day
HOUR[i] = python_datetime.hour
MINUTE[i] = python_datetime.minute
YYYYMMDD[i] = YEAR[i]*10000+MONTH[i]*100+DAY[i]
DATETIME.append(python_datetime.strftime('%m/%d/%Y %H:%M'))
#put it all in a dataframe
Sdict = {'YEAR':YEAR,'MONTH':MONTH,'DAY':DAY,'HOUR':HOUR,'MINUTE':MINUTE,'YYYYMMDD':YYYYMMDD,'DATETIME':DATETIME}
#Convert it to a dataframe
Datedf = pd.DataFrame(Sdict)
df = pd.concat([Datedf, df], axis=1)
return df
def Dates_to_MatlabDateNum(Inputdf,DateField):
CCMODdf = Inputdf.copy()
CCMODdf = CCMODdf.reset_index()
DateTimeArr = CCMODdf[DateField]
MatlabDateNum = []
#a = datetime.datetime(1899, 12, 31,0,0)
#fmt = '%Y-%m-%d %H:%M:%S %Z'
for i in range(len(CCMODdf)):
DateTimeElement= DateTimeArr[i]
MatlabDateNum.append(datetime.datetime.toordinal(DateTimeElement)+366.+DateTimeElement.hour/24.+DateTimeElement.minute/3600.)
CCMODdf['MATLABDATENUM'] = MatlabDateNum
return CCMODdf
def interpolate(x,y,x1):
xnoNaN = x[np.isfinite(y) & np.isfinite(x)]
ynoNaN = y[np.isfinite(y) & np.isfinite(x)]
inds = xnoNaN.argsort()
ynoNaN = ynoNaN[inds]
xnoNaN = xnoNaN[inds]
f = interp1d(xnoNaN,ynoNaN,bounds_error = False)
return f(x1)
#Function that parses individual WASP csv files and unpivots them
def parseWASPcsv(modelpath,csvfile):
print csvfile
#Get parameter from csvfile name
parameter = csvfile.split('.')[0]
#Get num columns
df = pd.read_csv(os.path.join(modelpath,csvfile), skiprows = 1)
num_Segs = len(df.columns)-1#2
column_names = ['Days']
for i in range(1,num_Segs+1):
column_names.append('Seg'+str(int(i)))
column_names.append('STORETCODE')
#print column_names
column_names = np.asarray(column_names)
df = pd.read_csv(os.path.join(modelpath,csvfile), skiprows = 1, header = None, names = column_names) #index_col = 0,
df['STORETCODE'] = np.asarray(df['STORETCODE'],dtype = '|S100')
df['STORETCODE'][:] = parameter
Keep_VARIABLEs = ['Days','STORETCODE']
up = unpivot(df,Keep_VARIABLEs)
return up#,mindepth_filter
#def prep_model_results(modelpath, outputpath, csvfiles_of_interest,SegmentsOfInterest, calibration_csv, basedate, mindepth, num_Segs):
def prep_model_results(modelpath, outputpath, pdf,SegmentsOfInterest, calibration_csv, basedate):
#Get a list of which files to read
csvfiles_of_interest = []
#parameters_of_interest = pdf['PlotParameter']
#for parameter in parameters_of_interest:
for i in pdf.index:
if pdf['PlotFlag'][i]==1:
csvfiles_of_interest.append(pdf['PlotParameter'][i] + '.csv')
#Use only unique see http://stackoverflow.com/questions/12897374/get-unique-VALUEs-from-a-list-in-python
csvfiles_of_interest = list(set(csvfiles_of_interest))
#Need to find way to figure out the number of segments automatically
if os.path.exists(outputpath) == False:
os.mkdir(outputpath)
os.chdir(modelpath)
#Use segment depth as base file
#Basedf = parseWASPcsv(modelpath,'Segment_Depth.csv').reset_index()
#Basedf,mindepth_filter = parseWASPcsv_SegDepth(modelpath,'Segment_Depth.csv',mindepth)
Basedf = parseWASPcsv(modelpath,'Segment_Depth.csv')
Basedf = Basedf.reset_index()
Basedf.columns = ['index','DAYS', 'STORETCODE', 'SITECODE','SEGMENT_DEPTH']
Basedf = Basedf[['DAYS', 'SITECODE','SEGMENT_DEPTH']]
Basedf = parse_WASPdays_datenum(Basedf,basedate)
#Define timeseries columns
Tcols = ['AGENCY','SITECODE','MONTH','DAY','YEAR','HOUR','MINUTE','DATE','YYYYMMDD','QUALIFIER',
'SEGMENT_DEPTH','DATATYPE','STORETCODE','VALUE_ORIG','STORETCODE_NEW','VALUE_NEW']
#Basedf['MIN_SEG_DEP'] = match['SEGMENT_DEPTH_min']
Basedf['AGENCY'] = np.asarray(Basedf['SITECODE'], dtype = str)
Basedf['AGENCY'][:] = 'WASP model' #+ modelpath
Basedf['DATATYPE'] = np.asarray(Basedf['SITECODE'], dtype = str)
Basedf['DATATYPE'][:] = 'WASP_results'
Basedf['DATE'] = Basedf['DATETIME']
Basedf['QUALIFIER'] = Basedf['SITECODE']
Basedf['QUALIFIER'] = ''
#Basedf['QUALIFIER'][Basedf['SEGMENT_DEPTH']<= mindepth] = 'DRY SEGMENT'
#Basedf.to_csv(os.path.join(outputpath,'Basedf.csv'),index = False)
#If you want to get a list of all WASP outputfiles
# just print allcsvfiles
#Process all other csvfiles
#for csvfile in allcsvfiles[(allcsvfiles !='SEGMENT_DEPTH')]:
Builder = Basedf.copy()
for csvfile in csvfiles_of_interest:
df = parseWASPcsv(modelpath,csvfile).reset_index()
df2 = pd.concat([Basedf, df[['STORETCODE','VALUE']]], axis=1)
df2['VALUE_ORIG'] = df2['VALUE']
df2['VALUE_NEW'] = df2['VALUE']
df2['STORETCODE_NEW'] = df2['STORETCODE']
df2 = df2[Tcols]#.reset_index()
#df2.to_csv(os.path.join(outputpath,'TimeSeries_'+csvfile),index = False)
col_name = df2['STORETCODE'][0]
Builder[col_name] = df2['VALUE_NEW']
Builder['TKN'] = Builder['Total_Organic_N'] +Builder['Ammonia']
Builder.to_csv(os.path.join(outputpath,'TimeSeries_WQ_multicolumns.csv'),index = False)
#When calibrating, only extract cells from segments of interest
Boris = Builder[Builder['SITECODE'].isin(SegmentsOfInterest)]
#Keep_VARIABLEs = ['DATETIME',' DAY','HOUR','MINUTE',' MONTH','YEAR','YYYYMMDD','DAYS','SITECODE','IMPACT_BY_DRY_FLAG','SEGMENT_DEPTH','AGENCY','DATATYPE','DATE','QUALIFIER','Unstable_flag']
Keep_VARIABLEs = ['AGENCY','SITECODE','MONTH','DAY','YEAR','HOUR','MINUTE','DATE','DATETIME','YYYYMMDD','QUALIFIER','SEGMENT_DEPTH','DATATYPE']
Calibdf = unpivot(Boris,Keep_VARIABLEs)
Calibdf['STORETCODE'] = Calibdf['VARIABLE']
Calibdf['VALUE_ORIG'] = Calibdf['VALUE']
Calibdf['STORETCODE_NEW'] = Calibdf['VARIABLE']
Calibdf['VALUE_NEW'] = Calibdf['VALUE']
#Tcols.append('Unstable_flag')
Calibdf = Calibdf[Tcols]
Calibdf = Calibdf[Calibdf['STORETCODE']!='DAYS']
columns = list(Calibdf.columns)
Calibdf = pd.merge(Calibdf,pdf,how = 'inner', left_on = 'STORETCODE', right_on = 'PlotParameter')
Calibdf['VARIABLE']=Calibdf['PlotParameter']
Calibdf['UNIT']=Calibdf['Units']
columns.append('VARIABLE')
columns.append('UNIT')
Calibdf = Calibdf[columns]
Calibdf['DEPTH_IN_METERS'] = Calibdf['SEGMENT_DEPTH'][:]/2
#Calibdf = Calibdf.reset_index()
Calibdf.to_csv(calibration_csv,index = False)
#---------------------------------------------------------------------------------------------------------------------------------
def main_processor(modelpath, pdf, prep_model_results_flag,SegmentsOfInterest,basedate):
outputpath = os.path.join(modelpath,'parsed')
#sitecodes_of_interest = ['OsoPlant','HRI_WestOso','HRI_OsoWard','HRI_OsoMouthShallow','HRI_NAS','HRI_Hwy358']
#sitecodes_plot_names = ['Oso Plant Outfall Channel','West Oso','Ward','Near Oso Mouth','NAS','Hwy358']
#SiteNames = ['Basin','Waller Ck','Lamar & South First Bridges','Barton Ck','Redbud Isle','Tom Miller Dam']
calibration_csv = os.path.join(outputpath,'TimeSeries_Calibration.csv')
#basedataYYYYMMDD = 20120701
#Plot_OsoMouthDeep_flag =0 #specify whether to plot Oso Mouth Deep
if prep_model_results_flag == 1:
print 'Prepare calibration file'
#prep_model_results(modelpath, outputpath, csvfiles_of_interest,SegmentsOfInterest, calibration_csv, basedate,mindepth,num_Segs)
prep_model_results(modelpath, outputpath, pdf,SegmentsOfInterest, calibration_csv, basedate)
#print 'Read model results'
#Calibdf = pd.read_csv(calibration_csv)
#Calibdf['DATETIMEVAL'] = GetDateTimeVal(Calibdf)
#============================================================MAIN DECK=======================================================================================
def MAIN_DECK(modelpaths, SegmentsOfInterest,basedate):
print 'Start processing...'
#modelpaths = [r'C:\G\LS20_Seg43_162MGD_20BOD5_20_4_5_4_20150615_cent_adj3']
prep_model_results_flag = 1
parameter_key_file =os.path.join(os.path.dirname(__file__),'ParameterKey.csv')
print parameter_key_file
#parameter_key_file = r'D:\APAI\M_drive\Projects\0382\022-05\2-0 Wrk Prod\Models\WASP\LBL\PostProcessing\Calibration\ParameterKey.csv'
pdf = pd.read_csv(parameter_key_file)
#pdf = pdf[pdf['PlotFlag']==1]
for modelpath in modelpaths:
print modelpath
main_processor(modelpath, pdf, prep_model_results_flag,SegmentsOfInterest,basedate)
print "done!"
if __name__ == '__main__':
# Just as an example
modelpaths = [r'C:\G_Drive\LadyBirdLake\WASP\20151216_1049']
SegmentsOfInterest = ['Seg31','Seg57','Seg67','Seg69','Seg73','Seg80','Seg117','Seg124']
basedate = datetime.datetime(2012,7,1,0,0)
MAIN_DECK(modelpaths, SegmentsOfInterest,basedate) | mit |
PMitura/smiles-neural-network | rnn/rnn.py | 1 | 14202 | import data, utility, metrics
import time
import numpy as np
from math import sqrt, exp, log, ceil
from scipy.stats.stats import pearsonr
from sklearn.metrics import roc_auc_score
import db.db as db
import visualization
from config import config as cc
import yaml
import pandas as pd
import socket
# not used to relieve MetaCentrum of some dependencies
# TODO: Remove unused imports after experiments are done
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout, LSTM, AveragePooling1D
from keras.layers import TimeDistributed, SimpleRNN, GRU
from keras.layers import BatchNormalization, Embedding, Merge
from keras.optimizers import Adam, RMSprop, Adadelta, Adagrad
from keras.regularizers import l2, activity_l2
import keras.callbacks
# from keras.regularizers import l1
# handy aliases for config
RP = cc.exp['params']['rnn']
RD = cc.exp['params']['data']
RG = cc.exp['grid']
# manual eval where needed
RP['chained_labels'] = eval(str(cc.exp['params']['rnn']['chained_labels']))
RP['chained_predict'] = eval(str(cc.exp['params']['rnn']['chained_predict']))
RP['chained_test_labels'] = eval(str(cc.exp['params']['rnn']['chained_test_labels']))
RP['freeze_idxs'] = eval(str(cc.exp['params']['rnn']['freeze_idxs']))
RP['label_idxs'] = eval(str(cc.exp['params']['rnn']['label_idxs']))
OPTIMIZER = Adam(lr = RP['learning_rate'], clipnorm = 1.)
def configureModel(input, outputLen = len(RD['labels'])):
print(' Initializing and compiling...')
alphaSize = input.shape[2]
model = Sequential()
'''
if RD['use_embedding']:
# second value in nomiSize tuple is shift while using embedding
model.add(Embedding(1 << nomiSize[1], RP['embedding_outputs']))
model.add(TimeDistributed(Dense(int(RP['td_layer_multiplier'] * (alphaSize +
nomiSize[0])), activation = 'tanh', trainable = RP['trainable_inner'])))
else:
'''
model.add(TimeDistributed(Dense(300*RG['ratios'][0], activation = 'tanh', trainable = RP['trainable_inner']), input_shape = (None, alphaSize )))
model.add(Dropout(0.30))
model.add(GRU(300*RG['ratios'][1], trainable = RP['trainable_inner'], return_sequences = True))
model.add(Activation('tanh', trainable = RP['trainable_inner']))
model.add(Dropout(0.30))
model.add(GRU(300*RG['ratios'][2], trainable = RP['trainable_inner']))
model.add(Activation('tanh', trainable = RP['trainable_inner']))
model.add(Dropout(0.30))
model.add(Dense(outputLen))
# molweight
# model = utility.loadModel('b3d9609da78bfbf0ad1a62ee6740df3b52f104b4', 'mol_')
# all compounds
# model = utility.loadModel('eab15a05a70b35d119c02fcc36b1cfaf27a0f36a', 'mol_')
# maccs
# model = utility.loadModel('67b51a1543b5d32b05671e4a08d193eed702ca54', 'mol_')
# model.pop()
# model.pop()
# for i in xrange(len(model.layers)):
# model.layers[0].trainable = False
'''
model.add(Dropout(0.50))
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dropout(0.50))
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dropout(0.30))
'''
# model.add(Dense(outputLen))
if RP['classify']:
model.add(Activation(RP['classify_activation'], trainable = RP['trainable_inner']))
metrics = []
if RP['classify']:
metrics.append('accuracy')
model.compile(loss = RP['objective'], optimizer = OPTIMIZER, metrics = metrics)
print(' ...done')
return model
def configureEdgeModel(inputSmiles, inputFasta):
print(' Initializing edge model and compiling...')
smilesGRUInputShape = (None, inputSmiles.shape[2])
# smilesGRUSize = int(RP['gru_layer_multiplier'] * smilesGRUInputShape[1])
fastaGRUInputShape = (None, inputFasta.shape[2])
# fastaGRUSize = int(RP['fasta_gru_layer_multiplier'] * fastaGRUInputShape[1])
mergedOutputLen = len(RD['labels'])
smilesModel = Sequential()
smilesModel.add(TimeDistributed(Dense(300, activation = 'tanh', trainable = RP['trainable_inner']), input_shape = smilesGRUInputShape))
smilesModel.add(Dropout(0.30))
smilesModel.add(GRU(300, trainable = RP['trainable_inner'], return_sequences = True))
smilesModel.add(Activation('tanh', trainable = RP['trainable_inner']))
smilesModel.add(Dropout(0.30))
smilesModel.add(GRU(300, trainable = RP['trainable_inner']))
smilesModel.add(Activation('tanh', trainable = RP['trainable_inner']))
# utility.setModelConsumeLess(smilesModel, 'mem')
'''
smilesModel = utility.loadModel('24e62794bb6d5b5c562e41a3a2cccc3525fa625f', 'smiles_')
smilesModel.pop() # output
smilesModel.pop() # dropout
'''
# utility.setModelConsumeLess(smilesModel, 'gpu')
fastaModel = Sequential()
fastaModel.add(TimeDistributed(Dense(300, activation = 'tanh', trainable = RP['trainable_inner']), input_shape = fastaGRUInputShape))
fastaModel.add(Dropout(0.30))
fastaModel.add(GRU(300, trainable = RP['trainable_inner'], return_sequences = True))
fastaModel.add(Activation('tanh', trainable = RP['trainable_inner']))
fastaModel.add(Dropout(0.30))
fastaModel.add(GRU(300, trainable = RP['trainable_inner']))
fastaModel.add(Activation('tanh', trainable = RP['trainable_inner']))
# utility.setModelConsumeLess(fastaModel, 'mem')
'''
fastaModel = utility.loadModel('e6beb8b7e146b9ab46a71db8f3001bf62d96ff08', 'fasta_')
fastaModel.pop() # activation
fastaModel.pop() # output
fastaModel.pop() # dropout
'''
# utility.setModelConsumeLess(fastaModel, 'gpu')
merged = Merge([smilesModel, fastaModel], mode='concat')
mergedModel = Sequential()
mergedModel.add(merged)
mergedModel.add(Dense(300))
mergedModel.add(Activation('relu'))
mergedModel.add(Dropout(0.3))
mergedModel.add(Dense(300))
mergedModel.add(Activation('relu'))
mergedModel.add(Dropout(0.3))
mergedModel.add(Dense(mergedOutputLen))
if RP['classify']:
mergedModel.add(Activation(RP['classify_activation'], trainable = RP['trainable_inner']))
metrics = []
if RP['classify']:
metrics.append('accuracy')
mergedModel.compile(loss = RP['objective'], optimizer = OPTIMIZER, metrics = metrics)
print(' ...done')
return mergedModel
def learningRateDecayer(epoch):
if not RP['learning_rate_decay']:
return RP['learning_rate']
if RP['learning_rate_decay_type'] == 'step':
drop = np.floor((epoch)/RP['learning_rate_decay_step_config_steps'])
new_lr = float(RP['learning_rate'] * np.power(RP['learning_rate_decay_step_config_ratio'],drop))
print('lr',epoch,new_lr)
return new_lr
elif RP['learning_rate_decay_type'] == 'time':
raise NotImplementedError('learning rate decay: time')
elif RP['learning_rate_decay_type'] == 'peter':
raise NotImplementedError('learning rate decay: peter')
else:
raise RuntimeError('learning rate decat: unknown type {}'.format(RP['learning_rate_decay_type']))
def train(model, nnInput, labels, validation, makePlot = True,
labelIndexes = RP['label_idxs']):
print(' Training model...')
# needed format is orthogonal to ours
'''
formattedLabels = np.zeros((len(labels[0]), len(labelIndexes)))
formattedValid = np.zeros((len(validation[1][labelIndexes[0]]),
len(labelIndexes)))
for i in range(len(labelIndexes)):
for j in range(len(labels[0])):
formattedLabels[j][i] = labels[labelIndexes[i]][j]
for j in range(len(validation[1][labelIndexes[i]])):
formattedValid[j][i] = validation[1][labelIndexes[i]][j]
'''
early = keras.callbacks.EarlyStopping(monitor = 'val_loss',
patience = RP['early_stop'])
learningRateScheduler = keras.callbacks.LearningRateScheduler(learningRateDecayer)
modelLogger = visualization.ModelLogger()
history = model.fit(nnInput, labels, nb_epoch = RP['epochs'],
batch_size = RP['batch'], callbacks = [early],
validation_data = (validation[0], validation[1]))
if makePlot:
values = np.zeros((len(history.history['loss']), 2))
for i in range(len(history.history['loss'])):
values[i][0] = history.history['loss'][i]
values[i][1] = history.history['val_loss'][i]
utility.plotLoss(values)
visualization.histograms(modelLogger)
print(' Model weights:')
print(model.summary())
# print(model.get_weights())
print(' ...done')
return len(history.history['loss'])
def run(grid = None):
stats = {}
stats['runtime_second'] = time.time()
# initialize using the same seed (to get stable results on comparisons)
np.random.seed(RP['seed'])
# grab the commit at start
stats['git_commit'] = utility.getGitCommitHash()
# get the training and testing datasets along with some meta info
if RP['edge_prediction']:
trainIn, trainLabel, testIn, testLabel, preprocessMeta = data.preprocessEdgeData(db.getData())
else:
trainIn, trainLabel, testIn, testLabel, preprocessMeta = data.preprocessData(db.getData())
# trainIn, trainLabel, testIn, testLabel, preprocessMeta = data.preprocessFastaOneHotData(db.getData())
stats['training_row_count'] = len(testLabel)
stats['testing_row_count'] = len(testLabel)
# load model from file or create and train one from scratch
if RP['load_model']:
model = utility.loadModel(RP['load_model'])
else:
if RP['edge_prediction']:
model = configureEdgeModel(trainIn[0],trainIn[1])
elif RP['discrete_label']:
model = configureModel(trainIn, len(trainLabel[0]))
else:
model = configureModel(trainIn)
stats['epoch_count'] = train(model, trainIn, trainLabel, (testIn, testLabel))
# persistence first
if cc.cfg['persistence']['model']:
name = '{}_rg_{}'.format(stats['git_commit'],':'.join([str(x) for x in RG['ratios']]))
# name = stats['git_commit']
stats['persistent_model_name'] = name
utility.saveModel(model, name)
# compute metrics for the model based on the task for both testing and training data
print('\nGetting metrics for training data:')
if RP['classify']:
if RP['discrete_label']:
trainMetrics = metrics.discreteClassify(model, trainIn, trainLabel, preprocessMeta)
else:
trainMetrics = metrics.classify(model, trainIn, trainLabel, preprocessMeta)
else:
trainMetrics = metrics.predict(model, trainIn, trainLabel, preprocessMeta)
print('\nGetting metrics for test data:')
if RP['classify']:
if RP['discrete_label']:
testMetrics = metrics.discreteClassify(model, testIn, testLabel, preprocessMeta)
else:
testMetrics = metrics.classify(model, testIn, testLabel, preprocessMeta)
else:
testMetrics = metrics.predict(model, testIn, testLabel, preprocessMeta)
# utilities and visualizations
if cc.cfg['plots']['layer_activations']:
visualization.layerActivations(model, testIn, testLabel)
if cc.cfg['plots']['seq_output']:
df = pd.DataFrame(cc.cfg['plots']['seq_output_seq_input'], columns=[RD['fasta'] if cc.cfg['plots']['seq_output_seq_input_name'] == 'fasta' else RD['smiles']])
visualization.visualizeSequentialOutput(model, cc.cfg['plots']['seq_output_layer_idx'], df)
if cc.cfg['plots']['print_pred']:
visualization.printPrediction(model, cc.cfg['plots']['print_pred_smiles'])
if cc.cfg['plots']['print_train_test_pred']:
visualization.printTrainTestPred(model, cc.cfg['plots']['print_train_test_pred_cnt'], trainIn, trainLabel, testIn, testLabel, preprocessMeta)
# statistics to send to journal
stats['runtime_second'] = time.time() - stats['runtime_second']
stats['memory_pm_mb'], stats['memory_vm_mb'] = utility.getMemoryUsage()
stats['comment'] = RP['comment']
stats['hostname'] = socket.gethostname()
stats['experiment_config'] = yaml.dump(cc.exp,default_flow_style=False)
stats['model'] = utility.modelToString(model)
stats['loaded_model'] = RP['load_model']
stats['parameter_count'] = model.count_params()
stats['task'] = 'classification' if RP['classify'] else 'regression'
stats['dataset_name'] = cc.exp['fetch']['table']
stats['split_name'] = RD['testing']
stats['label_name'] = ','.join(RD['labels'])
stats['epoch_max'] = RP['epochs']
stats['learning_rate'] = RP['learning_rate']
stats['optimization_method'] = OPTIMIZER.__class__.__name__
stats['batch_size'] = RP['batch']
stats['seed'] = RP['seed']
stats['objective'] = RP['objective']
stats['learning_curve'] = {'val':open('{}/{}'.format(cc.cfg['plots']['dir'], utility.PLOT_NAME),'rb').read(),'type':'bin'}
# metric statistics to send
metricStats = {}
if RP['classify']:
metricStats['relevance_training'] = trainMetrics['acc_avg']
metricStats['relevance_training_std'] = trainMetrics['acc_std']
metricStats['relevance_testing'] = testMetrics['acc_avg']
metricStats['relevance_testing_std'] = testMetrics['acc_std']
metricStats['log_loss'] = testMetrics['log_loss_avg']
metricStats['log_loss_std'] = testMetrics['log_loss_std']
metricStats['auc'] = testMetrics['auc_avg']
metricStats['auc_std'] = testMetrics['auc_std']
metricStats['auc_micro'] = testMetrics['auc_avg']
metricStats['auc_micro_std'] = testMetrics['auc_std']
else:
metricStats['relevance_training'] = trainMetrics['r2_avg']
metricStats['relevance_training_std'] = trainMetrics['r2_std']
metricStats['relevance_testing'] = testMetrics['r2_avg']
metricStats['relevance_testing_std'] = testMetrics['r2_std']
metricStats['mse'] = testMetrics['mse_avg']
metricStats['mse_std'] = testMetrics['mse_std']
metricStats['mae'] = testMetrics['mae_avg']
metricStats['mae_std'] = testMetrics['mae_std']
stats.update(metricStats)
db.sendStatistics(**stats)
utility.freeModel(model)
| bsd-3-clause |
rdhyee/working-open-data-2014 | notebooks/Day_19_B_HackFSM.py | 2 | 11257 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# HackFSM
#
# Relationship to other public APIs based on Solr?
#
# * http://www.hathitrust.org/htrc/solr-api
# * http://api.plos.org/solr/search-fields/
#
# <markdowncell>
# Documentation:
#
# http://digitalhumanities.berkeley.edu/hackfsm/api/detail
# <codecell>
from settings import (HACKFSM_ID, HACKFSM_KEY, HACKFSM_BASEURL)
from itertools import islice
import logging
import requests
import json
import urllib
import urlparse
from pandas import DataFrame, Series
import pandas as pd
import numpy as np
logging.basicConfig(filename='Experiment_20140325_HackFSM.log',level=logging.WARNING)
logger=logging.getLogger()
# <codecell>
def query(q, fl="id"):
url = "{base_url}?".format(base_url=HACKFSM_BASEURL) + \
urllib.urlencode({'q':q,
'fl':fl,
'wt':'json',
'app_id':HACKFSM_ID,
'app_key':HACKFSM_KEY})
r = requests.get(url)
return r.json()
# <codecell>
result = query(q="fsmTitle:Savio")['response']
result
# <headingcell level=1>
# Paging through results
# <codecell>
# try again
# http://stackoverflow.com/a/5724453/7782
# http://excess.org/article/2013/02/itergen1/
class my_g(object):
def __init__(self,max_count):
self._remaining = range(max_count)
self._len = max_count
def __iter__(self):
return self
def __len__(self):
return self._len
def next(self):
if not self._remaining:
raise StopIteration
return self._remaining.pop(0)
g=my_g(10)
print len(g)
list(g)
# <codecell>
class FSM(object):
def __init__(self, q, fl="id", start=0, rows=30,
base_url=HACKFSM_BASEURL, app_id=HACKFSM_ID, app_key=HACKFSM_KEY):
self.q = q
self.fl = fl
self.start = start
self.rows = rows
self.base_url = base_url
self.app_id = app_id
self.app_key = app_key
# get first page and numfound
self.cursor = start
# get the first page
result = self._get_page(q, fl, self.cursor, self.rows)
self.numfound = result['response']['numFound']
def _check_status(self,result):
"""throw exception if non-zero status"""
if result['responseHeader']['status'] != 0:
raise FSMException("status: " + str(result['responseHeader']['status']))
def _get_page(self, q, fl, start, rows):
result = self._call_api(q, fl, start, rows)
# update current page
self.page = result['response']['docs']
self.page_len = len(self.page)
return result
def _call_api(self, q, fl, start, rows):
url = "{base_url}?".format(base_url=self.base_url) + \
urllib.urlencode({'q':q,
'fl':fl,
'wt':'json',
'start':start,
'row':rows,
'app_id':self.app_id,
'app_key':self.app_key})
result = requests.get(url).json()
self._check_status(result)
# check whether we're getting fewer records than expected
if len(result['response']['docs']) < rows:
# are we at the end of the results
if start + len(result['response']['docs']) != self.numfound:
logger.warning("url:{url}, numfound:{numfound}, start+len{start_plus_len}".format(url=url,
numfound=self.numfound,
start_plus_len=start + len(result['response']['docs'])))
return result
def __iter__(self):
return self
def __len__(self):
return self.numfound
def next(self):
if not self.page:
# retrieve next page and check whether there's anything left
self.cursor += self.page_len
result = self._get_page(self.q, self.fl, self.cursor, self.rows)
if self.page_len == 0:
raise StopIteration
return self.page.pop(0)
# <codecell>
fsm = FSM("-fsmTeiUrl:[* TO *]", fl="id,fsmTitle,fsmImageUrl,fsmDateCreated")
# <codecell>
len(fsm)
# <codecell>
results = list(islice(fsm,None))
results[:10]
# <codecell>
df = DataFrame(results)
# <codecell>
len(df)
# <codecell>
df.fsmImageUrl
# <codecell>
from IPython.display import HTML
from jinja2 import Template
CSS = """
<style>
.wrap img {
margin-left: 0px;
margin-right: 0px;
display: inline-block;
width: 150px;
}
.wrap {
/* Prevent vertical gaps */
line-height: 0;
-webkit-column-count: 5;
-webkit-column-gap: 0px;
-moz-column-count: 5;
-moz-column-gap: 0px;
column-count: 5;
column-gap: 0px;
}
.wrap img {
/* Just in case there are inline attributes */
width: 100% !important;
height: auto !important;
}
</style>
"""
IMAGES_TEMPLATE = CSS + """
<div class="wrap">
{% for item in items %}<img title="{{item.fsmTitle.0}}" src="{{item.fsmImageUrl.0}}"/>{% endfor %}
</div>
"""
template = Template(IMAGES_TEMPLATE)
HTML(template.render(items=results[:10]))
# <markdowncell>
# # DISTINGUISHING IMAGES FROM DOCUMENTS
#
# To programmatically differentiate records that describe images from records that describe TEI-encoded XML documents, the API permits queries that exclude records with NULL values in the "unwanted" Url field.
#
# That is, to retrieve TEI documents only, one would query for null values in the `fsmImageUrl` field. To retrieve images only, one would query for null values in the `fsmTeiUrl` field.
#
# NOTE: Please observe the hyphen prepended to the field names in the examples below. The hyphen (minus sign) functions here as a NOT operator.
#
# Example that selects for TEI encoded XML documents by excluding null values of `fsmImageUrl`:
#
# https://<BASE URL>/solr/fsm/select?q=-fsmImageUrl:[* TO *]&wt=json&indent=true&app_id=abcdefgh&app_key=12345678901234567890123456789012
#
# Example that selects for images by excluding null values of fsmTeiUrl:
#
# https://<BASE URL>/solr/fsm/select?q=-fsmTeiUrl:[* TO *]&wt=json&indent=true&app_id=abcdefgh&app_key=12345678901234567890123456789012
# <codecell>
# TEI-encoded docs
len(FSM("-fsmImageUrl:[* TO *]"))
# <codecell>
# images
len(FSM("-fsmTeiUrl:[* TO *]", fl="id,fsmImageUrl"))
# <headingcell level=1>
# Studying the API parameters
# <codecell>
from lxml.html import parse, fromstring
from collections import OrderedDict
api_docs_url = "http://digitalhumanities.berkeley.edu/hackfsm/api/detail"
r = requests.get(api_docs_url).content
doc = fromstring(r)
# <codecell>
rows = doc.xpath('//div[@id="content"]/article/div/div/div/table[1]//tr')
headers = [col.text_content().strip() for col in rows[0].findall('td')]
headers
# <codecell>
fields = []
for row in rows[1:]:
field = [col.text_content().strip() for col in row.findall('td')]
fields.append(field)
fsmfields = OrderedDict(fields)
fsmfields.keys()
# <headingcell level=1>
# Study all the records
# <codecell>
fsm = FSM(q="*",fl=",".join(fsmfields.keys()))
# <codecell>
len(fsm)
# <codecell>
df = DataFrame(list(fsm))
# <codecell>
len(df)
# <codecell>
df.head()
# <codecell>
# TEI URIs
len(list(df[~df.fsmTeiUrl.isnull()].fsmTeiUrl.apply(lambda a: a[0])))
# <codecell>
# null dates
len(df[df.fsmDateCreated.isnull()])
# <codecell>
# non-null image URLs
len(df[~df.fsmImageUrl.isnull()])
# <codecell>
df[~df.fsmImageUrl.isnull()].id
# <codecell>
# distribution of number of image URLs
df[~df.fsmImageUrl.isnull()].fsmImageUrl.apply(len).value_counts()
# <codecell>
# let's crawl for images
results_images = list(FSM("-fsmTeiUrl:[* TO *]", fl=",".join(fsmfields.keys())))
# <codecell>
len(results_images)
# <codecell>
df_images=DataFrame(results_images)
# <codecell>
df_images[df_images.fsmImageUrl.isnull()]
# <codecell>
# would be interesting to see sizes of images and whether we can get at thumbnails
df_images.fsmImageUrl
# <markdowncell>
# http://content.cdlib.org/ark:/13030/tf1z09n5r1/thumbnail ->
# http://digitalassets.lib.berkeley.edu/fsm/ucb/images/brk00040569b_a.gif
#
# 
#
# http://content.cdlib.org/ark:/13030/tf1z09n5r1/hi-res.jpg ->
# http://digitalassets.lib.berkeley.edu/fsm/ucb/images/brk00040569b_c.jpg
# <codecell>
urlparse.urlparse("http://digitalassets.lib.berkeley.edu/fsm/ucb/images/brk00040569b_c.jpg").netloc
# <codecell>
df_images.fsmImageUrl
# <codecell>
# calculate hostnames for all image urls
# might be possible to do this all with pandas
netlocs = list(df_images.fsmImageUrl.dropna().apply(lambda urls: set([urlparse.urlparse(url).netloc for url in urls])))
reduce(lambda x,y: x | y, netlocs, set())
# <codecell>
def len2(x):
try:
return len(x)
except:
return np.nan
df_images.fsmImageUrl.apply(len2) == 3
# <codecell>
df_images[df_images.fsmImageUrl.apply(len2) == 3].head()
# <markdowncell>
# 
# 
# 
# <codecell>
df_images[df_images.fsmImageUrl.apply(len2) == 4].ix[100].fsmImageUrl
# <codecell>
IMAGES_TEMPLATE = """
<div class="nowrap">
{% for item in items %}<img title="{{item}}" src="{{item}}"/>{% endfor %}
</div>
"""
template = Template(IMAGES_TEMPLATE)
HTML(template.render(items=df_images[df_images.fsmImageUrl.apply(len2) == 4].ix[100].fsmImageUrl ))
# <headingcell level=1>
# Dates
# <codecell>
len(df[~df.fsmDateCreated.isnull()])
# <codecell>
s = df[~df.fsmDateCreated.isnull()].fsmDateCreated.apply(len)==2 #.astype('datetime64[ns]')
# <codecell>
def first(x):
try:
return x[0]
except:
return np.nan
df['calc_date'] = pd.to_datetime(df.fsmDateCreated.apply(first), coerce=True)
# <codecell>
df[~df.calc_date.isnull()].sort_index(by='calc_date').calc_date
# <codecell>
pd.to_datetime(df.fsmDateCreated.dropna().apply(lambda s:s[0]).astype('str'), coerce=True).dropna()
# <codecell>
# http://stackoverflow.com/questions/17690738/in-pandas-how-do-i-convert-a-string-of-date-strings-to-datetime-objects-and-put
date_stngs = ('2008-12-20','2008-12-21','2008-12-22','2008-12-23','Nov. 9, 1964', 'junk')
pd.to_datetime(pd.Series(date_stngs),coerce=True)
# <headingcell level=1>
# Types of Resources
# <codecell>
def f(x):
try:
return set(x)
except:
return set()
reduce(lambda x,y: x | y, df.fsmTypeOfResource.apply(f), set())
# <codecell>
#related id
len(df.fsmRelatedIdentifier.dropna())
# <headingcell level=1>
# TEI documents
# <codecell>
df.fsmTeiUrl.dropna()
| apache-2.0 |
lcharleux/compmod-doc | doc/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| gpl-2.0 |
jjhelmus/scipy | scipy/signal/signaltools.py | 2 | 115724 | # Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
import threading
import sys
import timeit
from . import sigtools, dlti
from ._upfirdn import upfirdn, _output_len
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import fftpack, linalg
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
product, r_, ravel, real_if_close, reshape,
roots, sort, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
import math
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
from .filter_design import cheby1, _validate_sos
from .fir_filter_design import firwin
if sys.version_info.major >= 3 and sys.version_info.minor >= 5:
from math import gcd
else:
from fractions import gcd
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'resample_poly', 'detrend',
'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _inputs_swap_needed(mode, shape1, shape2):
"""
If in 'valid' mode, returns whether or not the input arrays need to be
swapped depending on whether `shape1` is at least as large as `shape2` in
every dimension.
This is important for some of the correlation and convolution
implementations in this module, where the larger array input needs to come
before the smaller array input when operating in this mode.
Note that if the mode provided is not 'valid', False is immediately
returned.
"""
if mode == 'valid':
ok1, ok2 = True, True
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
ok1 = False
if not d2 >= d1:
ok2 = False
if not (ok1 or ok2):
raise ValueError("For 'valid' mode, one must be at least "
"as large as the other in every dimension")
return not ok1
return False
def correlate(in1, in2, mode='full', method='auto'):
r"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the correlation.
``direct``
The correlation is determined directly from sums, the definition of
correlation.
``fft``
The Fast Fourier Transform is used to perform the correlation more
quickly (only available for numerical arrays.)
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See `convolve` Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
See Also
--------
choose_conv_method : contains more documentation on `method`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as::
z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])
This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')`` then
.. math::
z[k] = (x * y)(k - N + 1)
= \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}
for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`
where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`,
and :math:`y_m` is 0 when m is outside the range of y.
``method='fft'`` only works for numerical arrays as it relies on
`fftconvolve`. In certain cases (i.e., arrays of objects or when
rounding integers can lose precision), ``method='direct'`` is always used.
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
# this either calls fftconvolve or this function with method=='direct'
if method in ('fft', 'auto'):
return convolve(in1, _reverse_and_conj(in2), mode, method)
# fastpath to faster numpy.correlate for 1d inputs when possible
if _np_conv_ok(in1, in2, mode):
return np.correlate(in1, in2, mode)
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward if mode == 'full'. Also, it fails
# with 'valid' mode if in2 is larger than in1, so swap those, too.
# Don't swap inputs for 'same' mode, since shape of in1 matters.
swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
_inputs_swap_needed(mode, in1.shape, in2.shape))
if swapped_inputs:
in1, in2 = in2, in1
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
if swapped_inputs:
# Reverse and conjugate to undo the effect of swapping inputs
z = _reverse_and_conj(z)
return z
def _centered(arr, newshape):
# Return the center newshape portion of the array.
newshape = asarray(newshape)
currshape = array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
As of v0.19, `convolve` automatically chooses this method or the direct
method based on an estimation of which is faster.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse.
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, complex) or
np.issubdtype(in2.dtype, complex))
shape = s1 + s2 - 1
# Check that input sizes are compatible with 'valid' mode
if _inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
in1, s1, in2, s2 = in2, s2, in1, s1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [fftpack.helper.next_fast_len(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
sp1 = np.fft.rfftn(in1, fshape)
sp2 = np.fft.rfftn(in2, fshape)
ret = (np.fft.irfftn(sp1 * sp2, fshape)[fslice].copy())
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp1 = fftpack.fftn(in1, fshape)
sp2 = fftpack.fftn(in2, fshape)
ret = fftpack.ifftn(sp1 * sp2)[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _numeric_arrays(arrays, kinds='buifc'):
"""
See if a list of arrays are all numeric.
Parameters
----------
ndarrays : array or list of arrays
arrays to check if numeric.
numeric_kinds : string-like
The dtypes of the arrays to be checked. If the dtype.kind of
the ndarrays are not in this string the function returns False and
otherwise returns True.
"""
if type(arrays) == ndarray:
return arrays.dtype.kind in kinds
for array_ in arrays:
if array_.dtype.kind not in kinds:
return False
return True
def _prod(iterable):
"""
Product of a list of numbers.
Faster than np.prod for short lists like array shapes.
"""
product = 1
for x in iterable:
product *= x
return product
def _fftconv_faster(x, h, mode):
"""
See if using `fftconvolve` or `_correlateND` is faster. The boolean value
returned depends on the sizes and shapes of the input values.
The big O ratios were found to hold across different machines, which makes
sense as it's the ratio that matters (the effective speed of the computer
is found in both big O constants). Regardless, this had been tuned on an
early 2015 MacBook Pro with 8GB RAM and an Intel i5 processor.
"""
if mode == 'full':
out_shape = [n + k - 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 10963.92823819 if x.ndim == 1 else 8899.1104874
elif mode == 'same':
out_shape = x.shape
if x.ndim == 1:
if h.size <= x.size:
big_O_constant = 7183.41306773
else:
big_O_constant = 856.78174111
else:
big_O_constant = 34519.21021589
elif mode == 'valid':
out_shape = [n - k + 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 41954.28006344 if x.ndim == 1 else 66453.24316434
else:
raise ValueError('mode is invalid')
# see whether the Fourier transform convolution method or the direct
# convolution method is faster (discussed in scikit-image PR #1792)
direct_time = (x.size * h.size * _prod(out_shape))
fft_time = sum(n * math.log(n) for n in (x.shape + h.shape +
tuple(out_shape)))
return big_O_constant * fft_time < direct_time
def _reverse_and_conj(x):
"""
Reverse array `x` in all dimensions and perform the complex conjugate
"""
reverse = [slice(None, None, -1)] * x.ndim
return x[reverse].conj()
def _np_conv_ok(volume, kernel, mode):
"""
See if numpy supports convolution of `volume` and `kernel` (i.e. both are
1D ndarrays and of the appropriate shape). Numpy's 'same' mode uses the
size of the larger input, while Scipy's uses the size of the first input.
"""
np_conv_ok = volume.ndim == kernel.ndim == 1
return np_conv_ok and (volume.size >= kernel.size or mode != 'same')
def _fftconvolve_valid(volume, kernel):
# fftconvolve doesn't support complex256
for not_fft_conv_supp in ["complex256", "complex192"]:
if hasattr(np, not_fft_conv_supp):
if volume.dtype == not_fft_conv_supp or kernel.dtype == not_fft_conv_supp:
return False
# for integer input,
# catch when more precision required than float provides (representing a
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return False
if _numeric_arrays([volume, kernel]):
return False
return True
def _timeit_fast(stmt="pass", setup="pass", repeat=3):
"""
Returns the time the statement/function took, in seconds.
Faster, less precise version of IPython's timeit. `stmt` can be a statement
written as a string or a callable.
Will do only 1 loop (like IPython's timeit) with no repetitions
(unlike IPython) for very slow functions. For fast functions, only does
enough loops to take 5 ms, which seems to produce similar results (on
Windows at least), and avoids doing an extraneous cycle that isn't
measured.
"""
timer = timeit.Timer(stmt, setup)
# determine number of calls per rep so total time for 1 rep >= 5 ms
x = 0
for p in range(0, 10):
number = 10**p
x = timer.timeit(number) # seconds
if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
break
if x > 1: # second
# If it's macroscopic, don't bother with repetitions
best = x
else:
number *= 10
r = timer.repeat(repeat, number)
best = min(r)
sec = best / number
return sec
def choose_conv_method(in1, in2, mode='full', measure=False):
"""
Find the fastest convolution/correlation method.
This primarily exists to be called during the ``method='auto'`` option in
`convolve` and `correlate`, but can also be used when performing many
convolutions of the same input shapes and dtypes, determining
which method to use for all of them, either to avoid the overhead of the
'auto' option or to use accurate real-world measurements.
Parameters
----------
in1 : array_like
The first argument passed into the convolution function.
in2 : array_like
The second argument passed into the convolution function.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
measure : bool, optional
If True, run and time the convolution of `in1` and `in2` with both
methods and return the fastest. If False (default), predict the fastest
method using precomputed values.
Returns
-------
method : str
A string indicating which convolution method is fastest, either
'direct' or 'fft'
times : dict, optional
A dictionary containing the times (in seconds) needed for each method.
This value is only returned if ``measure=True``.
See Also
--------
convolve
correlate
Notes
-----
For large n, ``measure=False`` is accurate and can quickly determine the
fastest method to perform the convolution. However, this is not as
accurate for small n (when any dimension in the input or output is small).
In practice, we found that this function estimates the faster method up to
a multiplicative factor of 5 (i.e., the estimated method is *at most* 5
times slower than the fastest method). The estimation values were tuned on
an early 2015 MacBook Pro with 8GB RAM but we found that the prediction
held *fairly* accurately across different machines.
If ``measure=True``, time the convolutions. Because this function uses
`fftconvolve`, an error will be thrown if it does not support the inputs.
There are cases when `fftconvolve` supports the inputs but this function
returns `direct` (e.g., to protect against floating point integer
precision).
.. versionadded:: 0.19
Examples
--------
Estimate the fastest method for a given input:
>>> from scipy import signal
>>> a = np.random.randn(1000)
>>> b = np.random.randn(1000000)
>>> method = signal.choose_conv_method(a, b, mode='same')
>>> method
'fft'
This can then be applied to other arrays of the same dtype and shape:
>>> c = np.random.randn(1000)
>>> d = np.random.randn(1000000)
>>> # `method` works with correlate and convolve
>>> corr1 = signal.correlate(a, b, mode='same', method=method)
>>> corr2 = signal.correlate(c, d, mode='same', method=method)
>>> conv1 = signal.convolve(a, b, mode='same', method=method)
>>> conv2 = signal.convolve(c, d, mode='same', method=method)
"""
volume = asarray(in1)
kernel = asarray(in2)
if measure:
times = {}
for method in ['fft', 'direct']:
times[method] = _timeit_fast(lambda: convolve(volume, kernel,
mode=mode, method=method))
chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
return chosen_method, times
# fftconvolve doesn't support complex256
fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192"
if hasattr(np, fftconv_unsup):
if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup:
return 'direct'
# for integer input,
# catch when more precision required than float provides (representing an
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return 'direct'
if _numeric_arrays([volume, kernel]):
if _fftconv_faster(volume, kernel, mode):
return 'fft'
return 'direct'
def convolve(in1, in2, mode='full', method='auto'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the convolution.
``direct``
The convolution is determined directly from sums, the definition of
convolution.
``fft``
The Fourier Transform is used to perform the convolution by calling
`fftconvolve`.
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
choose_conv_method : chooses the fastest appropriate convolution method
fftconvolve
Notes
-----
By default, `convolve` and `correlate` use ``method='auto'``, which calls
`choose_conv_method` to choose the fastest method using pre-computed
values (`choose_conv_method` can also measure real-world timing with a
keyword argument). Because `fftconvolve` relies on floating point numbers,
there are certain constraints that may force `method=direct` (more detail
in `choose_conv_method` docstring).
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
if _inputs_swap_needed(mode, volume.shape, kernel.shape):
# Convolution is commutative; order doesn't have any effect on output
volume, kernel = kernel, volume
if method == 'auto':
method = choose_conv_method(volume, kernel, mode=mode)
if method == 'fft':
out = fftconvolve(volume, kernel, mode=mode)
if volume.dtype.kind in 'ui':
out = np.around(out)
return out.astype(volume.dtype)
# fastpath to faster numpy.convolve for 1d inputs when possible
if _np_conv_ok(volume, kernel, mode):
return np.convolve(volume, kernel, mode)
return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `a`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `a`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * volume.ndim
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(volume.ndim):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or array_like, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * im.ndim
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') /
product(mysize, axis=0) - lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> ascent = misc.ascent()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
>>> ax_orig.imshow(ascent, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('convolve2d inputs must both be 2D arrays')
if _inputs_swap_needed(mode, in1.shape, in2.shape):
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('correlate2d inputs must both be 2D arrays')
swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
if swapped_inputs:
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
if swapped_inputs:
out = out[::-1, ::-1]
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a), len(b)) - 1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
lfiltic : Construct initial conditions for `lfilter`.
lfilter_zi : Compute initial state (steady state of step response) for
`lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
savgol_filter : A Savitzky-Golay filter.
sosfilt: Filter data using cascaded second-order sections.
sosfiltfilt: A forward-backward filter using second-order sections.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
- a[1]*y[n-1] - ... - a[N]*y[n-N]
where `M` is the degree of the numerator, `N` is the degree of the
denominator, and `n` is the sample number. It is implemented using
the following difference equations (assuming M = N)::
a[0]*y[n] = b[0] * x[n] + d[0][n-1]
d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
...
d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
d[N-1][n] = b[N] * x[n] - a[N] * y[n]
where `d` are the state variables.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -M
b[0] + b[1]z + ... + b[M] z
Y(z) = -------------------------------- X(z)
-1 -N
a[0] + a[1]z + ... + a[N] z
Examples
--------
Generate a noisy signal to be filtered:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 201)
>>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
... 0.1*np.sin(2*np.pi*1.25*t + 1) +
... 0.18*np.cos(2*np.pi*3.85*t))
>>> xn = x + np.random.randn(len(t)) * 0.08
Create an order 3 lowpass butterworth filter:
>>> b, a = signal.butter(3, 0.05)
Apply the filter to xn. Use lfilter_zi to choose the initial condition of
the filter:
>>> zi = signal.lfilter_zi(b, a)
>>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
Apply the filter again, to have a result filtered at an order the same as
filtfilt:
>>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
Use filtfilt to apply the filter:
>>> y = signal.filtfilt(b, a, xn)
Plot the original signal and the various filtered versions:
>>> plt.figure
>>> plt.plot(t, xn, 'b', alpha=0.75)
>>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
>>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
... 'filtfilt'), loc='best')
>>> plt.grid(True)
>>> plt.show()
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of
# singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[ind] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
where ``K = max(M, N)``.
See Also
--------
lfilter, lfilter_zi
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal`` using inverse filtering.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See Also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = zeros(N - D + 1, float)
input[0] = 1
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
See Also
--------
scipy.fftpack.hilbert : Return Hilbert transform of a periodic sequence x.
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the
instantaneous phase in respect to time. The instantaneous phase corresponds
to the phase angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = (np.diff(instantaneous_phase) /
... (2.0*np.pi) * fs)
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
Processing, Third Edition, 2009. Chapter 12.
ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fftpack.fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if x.ndim > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = fftpack.ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if x.ndim > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fftpack.fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = x.ndim
while k > 2:
h = h[:, newaxis]
k -= 1
x = fftpack.ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `invresz`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residue, invresz, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `residuez`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, residuez, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n]) /
factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `residue`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invresz, residue, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `invres`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fftpack.fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftpack.fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = fftpack.ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
W.shape = (Nx,)
sl = [slice(None)] * x.ndim
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = fftpack.ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0)):
"""
Resample `x` along the given axis using polyphase filtering.
The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. Values beyond the boundary of the signal are assumed to be zero
during the filtering step.
Parameters
----------
x : array_like
The data to be resampled.
up : int
The upsampling factor.
down : int
The downsampling factor.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : string, tuple, or array_like, optional
Desired window to use to design the low-pass filter, or the FIR filter
coefficients to employ. See below for details.
Returns
-------
resampled_x : array
The resampled array.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample : Resample up or down using the FFT method.
Notes
-----
This polyphase method will likely be faster than the Fourier method
in `scipy.signal.resample` when the number of samples is large and
prime, or when the number of samples is large and `up` and `down`
share a large greatest common denominator. The length of the FIR
filter used will depend on ``max(up, down) // gcd(up, down)``, and
the number of operations during polyphase filtering will depend on
the filter length and `down` (see `scipy.signal.upfirdn` for details).
The argument `window` specifies the FIR low-pass filter design.
If `window` is an array_like it is assumed to be the FIR filter
coefficients. Note that the FIR filter is applied after the upsampling
step, so it should be designed to operate on a signal at a sampling
frequency higher than the original by a factor of `up//gcd(up, down)`.
This function's output will be centered with respect to this array, so it
is best to pass a symmetric filter with an odd number of samples if, as
is usually the case, a zero-phase filter is desired.
For any other type of `window`, the functions `scipy.signal.get_window`
and `scipy.signal.firwin` are called to generate the appropriate filter
coefficients.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * down / float(up)``.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle for the FFT method, and gets closer to zero
for the polyphase method:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f_fft = signal.resample(y, 100)
>>> f_poly = signal.resample_poly(y, 100, 20)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
>>> plt.plot(x, y, 'ko-')
>>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
>>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
>>> plt.show()
"""
x = asarray(x)
up = int(up)
down = int(down)
if up < 1 or down < 1:
raise ValueError('up and down must be >= 1')
# Determine our up and down factors
# Use a rational approimation to save computation time on really long
# signals
g_ = gcd(up, down)
up //= g_
down //= g_
if up == down == 1:
return x.copy()
n_out = x.shape[axis] * up
n_out = n_out // down + bool(n_out % down)
if isinstance(window, (list, np.ndarray)):
window = asarray(window)
if window.ndim > 1:
raise ValueError('window must be 1-D')
half_len = (window.size - 1) // 2
h = window
else:
# Design a linear-phase low-pass FIR filter
max_rate = max(up, down)
f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
half_len = 10 * max_rate # reasonable cutoff for our sinc-like function
h = firwin(2 * half_len + 1, f_c, window=window)
h *= up
# Zero-pad our filter to put the output samples at the center
n_pre_pad = (down - half_len % down)
n_post_pad = 0
n_pre_remove = (half_len + n_pre_pad) // down
# We should rarely need to do this given our filter lengths...
while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],
up, down) < n_out + n_pre_remove:
n_post_pad += 1
h = np.concatenate((np.zeros(n_pre_pad), h, np.zeros(n_post_pad)))
n_pre_remove_end = n_pre_remove + n_out
# filter then remove excess
y = upfirdn(h, x, up, down, axis=axis)
keep = [slice(None), ]*x.ndim
keep[axis] = slice(n_pre_remove, n_pre_remove_end)
return y[keep]
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
:doi:`10.1063/1.3670512`.
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
:doi:`10.1007/s00422-013-0560-8`.
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1000
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, _prod(dshape) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
See Also
--------
lfilter, lfiltic, filtfilt
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Compute an initial state `zi` for the sosfilt function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward and once
backwards. The combined filter has linear phase.
The function provides options for handling the edges of the signal.
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
Notes
-----
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# method == "pad"
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=max(len(a), len(b)))
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def _validate_pad(padtype, padlen, x, axis, ntaps):
"""Helper to validate padding for filtfilt"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
return edge, ext
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = signal.unit_impulse(700)
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos, n_sections = _validate_sos(sos)
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r, got %r.' %
(axis, x.shape, n_sections, x_zi_shape, zi.shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
"""
A forward-backward filter using cascaded second-order sections.
See `filtfilt` for more complete information about this method.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is::
3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
(sos[:, 5] == 0).sum()))
The extra subtraction at the end attempts to compensate for poles
and zeros at the origin (e.g. for odd-order filters) to yield
equivalent estimates of `padlen` to those of `filtfilt` for
second-order section filters built with `scipy.signal` functions.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
filtfilt, sosfilt, sosfilt_zi, sosfreqz
Notes
-----
.. versionadded:: 0.18.0
"""
sos, n_sections = _validate_sos(sos)
# `method` is "pad"...
ntaps = 2 * n_sections + 1
ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=ntaps)
# These steps follow the same form as filtfilt with modifications
zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
zi_shape = [1] * x.ndim
zi_shape[axis] = 2
zi.shape = [n_sections] + zi_shape
x_0 = axis_slice(ext, stop=1, axis=axis)
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
y_0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
y = axis_reverse(y, axis=axis)
if edge > 0:
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):
"""
Downsample the signal after applying an anti-aliasing filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with Hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor. For downsampling factors higher than 13, it is
recommended to call `decimate` multiple times.
n : int, optional
The order of the filter (1 less than the length for 'fir'). Defaults to
8 for 'iir' and 30 for 'fir'.
ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
of an `dlti` object, uses that object to filter before downsampling.
axis : int, optional
The axis along which to decimate.
zero_phase : bool, optional
Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
when using an IIR filter, and shifting the outputs back by the filter's
group delay when using an FIR filter. The default value of ``True`` is
recommended, since a phase shift is generally not desired.
.. versionadded:: 0.18.0
Returns
-------
y : ndarray
The down-sampled signal.
See Also
--------
resample : Resample up or down using the FFT method.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The ``zero_phase`` keyword was added in 0.18.0.
The possibility to use instances of ``dlti`` as ``ftype`` was added in
0.18.0.
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is not None and not isinstance(n, int):
raise TypeError("n must be an integer")
if ftype == 'fir':
if n is None:
n = 30
system = dlti(firwin(n+1, 1. / q, window='hamming'), 1.)
elif ftype == 'iir':
if n is None:
n = 8
system = dlti(*cheby1(n, 0.05, 0.8 / q))
elif isinstance(ftype, dlti):
system = ftype._as_tf() # Avoids copying if already in TF form
n = np.max((system.num.size, system.den.size)) - 1
else:
raise ValueError('invalid ftype')
sl = [slice(None)] * x.ndim
if len(system.den) == 1: # FIR case
if zero_phase:
y = resample_poly(x, 1, q, axis=axis, window=system.num)
else:
# upfirdn is generally faster than lfilter by a factor equal to the
# downsampling factor, since it only calculates the needed outputs
n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
y = upfirdn(system.num, x, up=1, down=q, axis=axis)
sl[axis] = slice(None, n_out, None)
else: # IIR case
if zero_phase:
y = filtfilt(system.num, system.den, x, axis=axis)
else:
y = lfilter(system.num, system.den, x, axis=axis)
sl[axis] = slice(None, None, q)
return y[sl]
| bsd-3-clause |
appapantula/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
xavierwu/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
RTHMaK/RPGOne | Documents/skflow-master/skflow/tests/test_data_feeder.py | 2 | 4801 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from struct import Struct
import numpy as np
import six
import tensorflow as tf
from skflow import data_feeder
from skflow.io.data_feeder import setup_predict_data_feeder
from skflow.io import *
class MockPlaceholder(object):
def __init__(self, name):
self.name = name
class DataFeederTest(tf.test.TestCase):
def test_data_feeder_regression(self):
X = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
df = data_feeder.DataFeeder(X, y, n_classes=0, batch_size=3)
feed_dict_fn = df.get_feed_dict_fn(
MockPlaceholder(name='input'),
MockPlaceholder(name='output'))
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict['input'], [[3, 4], [1, 2]])
self.assertAllClose(feed_dict['output'], [2, 1])
def test_data_feeder_multioutput_regression(self):
X = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
df = data_feeder.DataFeeder(X, y, n_classes=0, batch_size=2)
feed_dict_fn = df.get_feed_dict_fn(
MockPlaceholder(name='input'),
MockPlaceholder(name='output'))
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict['input'], [[3, 4], [1, 2]])
self.assertAllClose(feed_dict['output'], [[3, 4], [1, 2]])
def test_data_feeder_multioutput_classification(self):
X = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
df = data_feeder.DataFeeder(X, y, n_classes=5, batch_size=2)
feed_dict_fn = df.get_feed_dict_fn(
MockPlaceholder(name='input'),
MockPlaceholder(name='output'))
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict['input'], [[3, 4], [1, 2]])
self.assertAllClose(feed_dict['output'], [[[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0]]])
def test_streaming_data_feeder(self):
def X_iter():
yield np.array([1, 2])
yield np.array([3, 4])
def y_iter():
yield np.array([1])
yield np.array([2])
df = data_feeder.StreamingDataFeeder(X_iter(), y_iter(), n_classes=0,
batch_size=2)
feed_dict_fn = df.get_feed_dict_fn(
MockPlaceholder(name='input'),
MockPlaceholder(name='output'))
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict['input'], [[1, 2], [3, 4]])
self.assertAllClose(feed_dict['output'], [1, 2])
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
X = pd.DataFrame(dict(a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
X = dd.from_pandas(X, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# X = extract_dask_data(X)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(X, y, n_classes=2, batch_size=2)
feed_dict_fn = df.get_feed_dict_fn(
MockPlaceholder(name='input'),
MockPlaceholder(name='output'))
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict['input'], [[ 0.40000001, 0.1],
[ 0.60000002, 0.2]])
self.assertAllClose(feed_dict['output'], [[ 0., 0., 1.],
[ 0., 1., 0.]])
class SetupPredictDataFeederTest(tf.test.TestCase):
def test_iterable_data(self):
X = iter([[1, 2], [3, 4], [5, 6]])
df = setup_predict_data_feeder(X, batch_size=2)
self.assertAllClose(six.next(df), [[1, 2], [3, 4]])
self.assertAllClose(six.next(df), [[5, 6]])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
altairpearl/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
xavierwu/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 98 | 20870 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
ch3ll0v3k/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
TomAugspurger/pandas | pandas/io/gbq.py | 1 | 7998 | """ Google BigQuery support """
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from pandas.compat._optional import import_optional_dependency
if TYPE_CHECKING:
from pandas import DataFrame
def _try_import():
# since pandas is a dependency of pandas-gbq
# we need to import on first use
msg = (
"pandas-gbq is required to load data from Google BigQuery. "
"See the docs: https://pandas-gbq.readthedocs.io."
)
pandas_gbq = import_optional_dependency("pandas_gbq", extra=msg)
return pandas_gbq
def read_gbq(
query: str,
project_id: Optional[str] = None,
index_col: Optional[str] = None,
col_order: Optional[List[str]] = None,
reauth: bool = False,
auth_local_webserver: bool = False,
dialect: Optional[str] = None,
location: Optional[str] = None,
configuration: Optional[Dict[str, Any]] = None,
credentials=None,
use_bqstorage_api: Optional[bool] = None,
private_key=None,
verbose=None,
progress_bar_type: Optional[str] = None,
) -> "DataFrame":
"""
Load data from Google BigQuery.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
dialect : str, default 'legacy'
Note: The default value is changing to 'standard' in a future version.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
.. versionchanged:: 0.24.0
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
*New in version 0.5.0 of pandas-gbq*.
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
This feature requires version 0.10.0 or later of the ``pandas-gbq``
package. It also requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.25.0
progress_bar_type : Optional, str
If set, use the `tqdm <https://tqdm.github.io/>`__ library to
display a progress bar while the data downloads. Install the
``tqdm`` package to use this feature.
Possible values of ``progress_bar_type`` include:
``None``
No progress bar.
``'tqdm'``
Use the :func:`tqdm.tqdm` function to print a progress bar
to :data:`sys.stderr`.
``'tqdm_notebook'``
Use the :func:`tqdm.tqdm_notebook` function to display a
progress bar as a Jupyter notebook widget.
``'tqdm_gui'``
Use the :func:`tqdm.tqdm_gui` function to display a
progress bar as a graphical dialog box.
Note that his feature requires version 0.12.0 or later of the
``pandas-gbq`` package. And it requires the ``tqdm`` package. Slightly
different than ``pandas-gbq``, here the default is ``None``.
.. versionadded:: 1.0.0
Returns
-------
df: DataFrame
DataFrame representing results of query.
See Also
--------
pandas_gbq.read_gbq : This function in the pandas-gbq library.
DataFrame.to_gbq : Write a DataFrame to Google BigQuery.
"""
pandas_gbq = _try_import()
kwargs: Dict[str, Union[str, bool, None]] = {}
# START: new kwargs. Don't populate unless explicitly set.
if use_bqstorage_api is not None:
kwargs["use_bqstorage_api"] = use_bqstorage_api
kwargs["progress_bar_type"] = progress_bar_type
# END: new kwargs
return pandas_gbq.read_gbq(
query,
project_id=project_id,
index_col=index_col,
col_order=col_order,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
dialect=dialect,
location=location,
configuration=configuration,
credentials=credentials,
**kwargs,
)
def to_gbq(
dataframe: "DataFrame",
destination_table: str,
project_id: Optional[str] = None,
chunksize: Optional[int] = None,
reauth: bool = False,
if_exists: str = "fail",
auth_local_webserver: bool = False,
table_schema: Optional[List[Dict[str, str]]] = None,
location: Optional[str] = None,
progress_bar: bool = True,
credentials=None,
verbose=None,
private_key=None,
) -> None:
pandas_gbq = _try_import()
pandas_gbq.to_gbq(
dataframe,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
verbose=verbose,
private_key=private_key,
)
| bsd-3-clause |
laijingtao/landlab | landlab/ca/examples/rock_weathering.py | 6 | 5506 | #!/usr/env/python
"""
rock_weathering.py
CellLab-CTS model that simulates the weathering of rock to saprolite around
a network of fractures.
Created (and translated from earlier code by) by Greg Tucker, Jul 2015
"""
from __future__ import print_function
import time
import numpy as np
from landlab import RasterModelGrid
from landlab.ca.celllab_cts import Transition, CAPlotter
from landlab.ca.raster_cts import RasterCTS
from landlab.components.fracture_grid.fracture_grid import make_frac_grid
import matplotlib
from landlab.io.netcdf import write_netcdf
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent the
grain-by-grain transformation of bedrock to saprolite.
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
Weathering here is treated very simply: a bedrock particle adjacent to a
saprolite particle has a specified probability (rate) of weathering to
saprolite; in other words, a rock-saprolite pair can turn into a
saprolite-saprolite pair.
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) 1 (0-1) 0.5
2 (1-0) 0.5
1 (0-1) 3 (1-1) 1.0
2 (1-0) 3 (1-1) 1.0
3 (1-1) (none) -
"""
# Create an empty transition list
xn_list = []
# Append two transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left/bottom cell, right/top cell, orientation)
# - Tuple representing new pair state
# (left/bottom cell, right/top cell, orientation)
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
xn_list.append( Transition((0,1,0), (1,1,0), 1., 'weathering') )
xn_list.append( Transition((1,0,0), (1,1,0), 1., 'weathering') )
return xn_list
def main():
# INITIALIZE
# User-defined parameters
nr = 200 # number of rows in grid
nc = 200 # number of columns in grid
plot_interval = 0.05 # time interval for plotting (unscaled)
run_duration = 5.0 # duration of run (unscaled)
report_interval = 10.0 # report interval, in real-time seconds
frac_spacing = 10 # average fracture spacing, nodes
outfilename = 'wx' # name for netCDF files
# Remember the clock time, and calculate when we next want to report
# progress.
current_real_time = time.time()
next_report = current_real_time + report_interval
# Counter for output files
time_slice = 0
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Set up the states and pair transitions.
ns_dict = { 0 : 'rock', 1 : 'saprolite' }
xn_list = setup_transition_list()
# Create the node-state array and attach it to the grid.
# (Note use of numpy's uint8 data type. This saves memory AND allows us
# to write output to a netCDF3 file; netCDF3 does not handle the default
# 64-bit integer type)
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=np.uint8)
node_state_grid[:] = make_frac_grid(frac_spacing, model_grid=mg)
# Create the CA model
ca = RasterCTS(mg, ns_dict, xn_list, node_state_grid)
# Set up the color map
rock_color = (0.8, 0.8, 0.8)
sap_color = (0.4, 0.2, 0)
clist = [rock_color, sap_color]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca, cmap=my_cmap)
# Plot the initial grid
ca_plotter.update_plot()
# Output the initial grid to file
write_netcdf((outfilename+str(time_slice)+'.nc'), mg,
#format='NETCDF3_64BIT',
names='node_state_map')
# RUN
current_time = 0.0
while current_time < run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print('Current sim time', current_time, '(',
100 * current_time/run_duration, '%)')
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state,
plot_each_transition=False)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
# Output the current grid to a netCDF file
time_slice += 1
write_netcdf((outfilename+str(time_slice)+'.nc'), mg,
#format='NETCDF3_64BIT',
names='node_state_map')
# FINALIZE
# Plot
ca_plotter.finalize()
# If user runs this file, activate the main() function
if __name__ == "__main__":
main()
| mit |
thilbern/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
ElDeveloper/qiime | qiime/plot_rank_abundance_graph.py | 15 | 4175 | #!/usr/bin/env python
# File created on 17 Aug 2010
from __future__ import division
__author__ = "Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Jens Reeder", "Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "[email protected]"
from matplotlib import use
use('Agg', warn=False)
from numpy import arange, array, sort
from itertools import cycle
from matplotlib.pyplot import plot, gca, ylim, xlim, show, legend, \
savefig
from os.path import join
from qiime.colors import data_color_order, data_colors
from biom.table import UnknownIDError
def make_sorted_frequencies(counts, absolute=False):
"""transform and sort a vector of count.
counts: a column of an OTU table
absolute: if True return absolute values instead of frequencies.
"""
c = sort(counts)
c = c[c.nonzero()]
c = c[::-1]
if absolute:
return c
else:
f = c / float(c.sum())
return f
def plot_rank_abundance_graph(
otu_count_vector, color='red', absolute=False, label=None):
"""Plots rank-abundance curve.
otu_count_vector: a vector of otu counts for a single sample
color: color of the series to plot
absolute: if True plot absolute counts instead of freqs
label: text for the legend of this series
"""
f = make_sorted_frequencies(otu_count_vector, absolute)
x = arange(1, len(f) + 1)
plot(x, f, color=color, alpha=0.8, label=label)
ax = gca()
return ax
def plot_rank_abundance_graphs(result_fp, sample_names, otu_table,
file_type='pdf',
absolute_counts=False,
x_linear_scale=False,
y_linear_scale=False,
no_legend=False,
log_fh=None):
"""plot rank-abundance curves for sample specified in sample_name.
result_fp: filename of output figure
sample_names: comma separated string of sample names
otu_table_fh: open file handle to otu table
file_type: valid matplotlib file type
x_linear_scale: if True draw x axis in linear scale, otherwise use log
y_linear_scale: if True draw y axis in linear scale, otherwise use log
no_legend: if True don't draw legend
log_fh: open file handle to log file, if not None used to log
"""
# figure out which samples to draw
if sample_names == '*':
user_sample_names = otu_table.ids()
else:
user_sample_names = sample_names.split(',')
if len(user_sample_names) < 1:
raise ValueError("sample IDs must be comma separated list of "
+ "sample names - found %s" % sample_names)
# do the actual drawing
ax = None
for sample_name, color in zip(user_sample_names, cycle(data_color_order)):
color = data_colors[color].toHex()
try:
otu_count_vector = otu_table.data(sample_name, 'sample')
except UnknownIDError:
if log_fh:
log_fh.write(
"UnknownIDError: Sample name %s not in OTU table - skipping." %
sample_name)
continue
ax = plot_rank_abundance_graph(otu_count_vector,
color=color,
absolute=absolute_counts,
label=sample_name)
ax.set_label(sample_name)
if ax is None:
# ax should be defined if at least one series has been drawn
raise ValueError(
"No data series drawn. Check your OTU table and sample names")
# settings for all series
ax.grid()
ax.set_xlabel('Species rank')
if absolute_counts:
ax.set_ylabel('Absolute abundance')
else:
ax.set_ylabel('Relative abundance')
if not x_linear_scale:
ax.set_xscale('log')
if not y_linear_scale:
ax.set_yscale('log')
if not no_legend:
legend()
if not result_fp.endswith(file_type):
result_fp += '.' + file_type
savefig(result_fp)
| gpl-2.0 |
Totoketchup/das | experiments/analysis/analyzing_adapt.py | 1 | 1839 | import tensorflow as tf
import os
import config
from utils.postprocessing.representation import *
import matplotlib.pyplot as plt
import numpy as np
import argparse
# from models import Adapt
## Here we plot the windows and bases of the Adapt model
# /home/anthony/das/log/pretraining/
# AdaptiveNet-noisy-breeze-3898-N=256--
# alpha=0.01--batch_size=16--beta=0.05--
# chunk_size=20480--maxpool=256--optimizer=Adam--
# reg=0.001--rho=0.01--same_filter=True--
# smooth_size=10--type=pretraining--window=1024-/
####
#### MODEL CONFIG
####
def main(args):
sess = tf.Session()
checkpoint = tf.train.latest_checkpoint(args.path)
importer = tf.train.import_meta_graph(checkpoint+'.meta')
importer.restore(sess, checkpoint)
graph = tf.get_default_graph()
front_window = graph.get_tensor_by_name('front/window/w:0')
front_bases = graph.get_tensor_by_name('front/bases/bases:0')
back_window = graph.get_tensor_by_name('back/window/value:0')
back_bases = graph.get_tensor_by_name('back/bases/value:0')
with sess.as_default():
front_window = front_window.eval()
front_bases = front_bases.eval()
front_bases = np.transpose(front_bases)
back_window = back_window.eval()
back_bases = back_bases.eval()
back_bases = np.transpose(back_bases)
win_size = front_window.shape[0]
filters_nb, _ = front_bases.shape
sub = filters_nb // 16
for j in range(sub):
fig, plots = plt.subplots(4, 4, figsize=(18, 16))
for x in range(4):
for y in range(4):
plots[x, y].plot(front_window*front_bases[j*16+(4*y+x)])
plots[x, y].axis([0,win_size,-0.005,0.005])
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Argument Parser")
parser.add_argument(
'--path', help='Path to Adapt model', required=True)
main(parser.parse_args())
| mit |
anirudhjayaraman/scikit-learn | sklearn/discriminant_analysis.py | 32 | 27308 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
microsoft/EconML | econml/tests/test_inference.py | 1 | 22822 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
import unittest
import pytest
import pickle
from sklearn.base import clone
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression, LogisticRegression, Lasso
from econml.dml import LinearDML, DML, NonParamDML
from econml.dr import LinearDRLearner, DRLearner
from econml.inference import (BootstrapInference, NormalInferenceResults,
EmpiricalInferenceResults, PopulationSummaryResults)
from econml.sklearn_extensions.linear_model import StatsModelsLinearRegression, DebiasedLasso
from econml.utilities import get_input_columns
class TestInference(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(123)
# DGP constants
cls.n = 1000
cls.d_w = 3
cls.d_x = 3
# Generate data
cls.X = np.random.uniform(0, 1, size=(cls.n, cls.d_x))
cls.W = np.random.normal(0, 1, size=(cls.n, cls.d_w))
cls.T = np.random.binomial(1, .5, size=(cls.n,))
cls.Y = np.random.normal(0, 1, size=(cls.n, ))
def test_summary(self):
"""Tests the inference results summary for continuous treatment estimators."""
# Test inference results when `cate_feature_names` doesn not exist
for inference in [BootstrapInference(n_bootstrap_samples=5), 'auto']:
cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(),
featurizer=PolynomialFeatures(degree=2,
include_bias=False)
)
cate_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
summary_results = cate_est.summary()
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
default_names = get_input_columns(TestInference.X)
fnames = PolynomialFeatures(degree=2, include_bias=False).fit(
TestInference.X).get_feature_names(default_names)
np.testing.assert_array_equal(coef_rows, fnames)
intercept_rows = np.asarray(summary_results.tables[1].data)[1:, 0]
np.testing.assert_array_equal(intercept_rows, ['cate_intercept'])
cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(),
featurizer=PolynomialFeatures(degree=2,
include_bias=False)
)
cate_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
fnames = ['Q' + str(i) for i in range(TestInference.d_x)]
summary_results = cate_est.summary(feature_names=fnames)
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
fnames = PolynomialFeatures(degree=2, include_bias=False).fit(
TestInference.X).get_feature_names(input_features=fnames)
np.testing.assert_array_equal(coef_rows, fnames)
cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(), featurizer=None)
cate_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
summary_results = cate_est.summary()
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
np.testing.assert_array_equal(coef_rows, ['X' + str(i) for i in range(TestInference.d_x)])
cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(), featurizer=None)
cate_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
fnames = ['Q' + str(i) for i in range(TestInference.d_x)]
summary_results = cate_est.summary(feature_names=fnames)
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
np.testing.assert_array_equal(coef_rows, fnames)
cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(), featurizer=None)
wrapped_est = self._NoFeatNamesEst(cate_est)
wrapped_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
summary_results = wrapped_est.summary()
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
np.testing.assert_array_equal(coef_rows, ['X' + str(i) for i in range(TestInference.d_x)])
cate_est = LinearDML(model_t=LinearRegression(), model_y=LinearRegression(), featurizer=None)
wrapped_est = self._NoFeatNamesEst(cate_est)
wrapped_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
fnames = ['Q' + str(i) for i in range(TestInference.d_x)]
summary_results = wrapped_est.summary(feature_names=fnames)
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
np.testing.assert_array_equal(coef_rows, fnames)
def test_summary_discrete(self):
"""Tests the inference results summary for discrete treatment estimators."""
# Test inference results when `cate_feature_names` doesn not exist
for inference in [BootstrapInference(n_bootstrap_samples=5), 'auto']:
cate_est = LinearDRLearner(model_regression=LinearRegression(), model_propensity=LogisticRegression(),
featurizer=PolynomialFeatures(degree=2,
include_bias=False)
)
cate_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
summary_results = cate_est.summary(T=1)
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
default_names = get_input_columns(TestInference.X)
fnames = PolynomialFeatures(degree=2, include_bias=False).fit(
TestInference.X).get_feature_names(default_names)
np.testing.assert_array_equal(coef_rows, fnames)
intercept_rows = np.asarray(summary_results.tables[1].data)[1:, 0]
np.testing.assert_array_equal(intercept_rows, ['cate_intercept'])
cate_est = LinearDRLearner(model_regression=LinearRegression(),
model_propensity=LogisticRegression(),
featurizer=PolynomialFeatures(degree=2,
include_bias=False)
)
cate_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
fnames = ['Q' + str(i) for i in range(TestInference.d_x)]
summary_results = cate_est.summary(T=1, feature_names=fnames)
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
fnames = PolynomialFeatures(degree=2, include_bias=False).fit(
TestInference.X).get_feature_names(input_features=fnames)
np.testing.assert_array_equal(coef_rows, fnames)
cate_est = LinearDRLearner(model_regression=LinearRegression(),
model_propensity=LogisticRegression(), featurizer=None)
cate_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
summary_results = cate_est.summary(T=1)
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
np.testing.assert_array_equal(coef_rows, ['X' + str(i) for i in range(TestInference.d_x)])
cate_est = LinearDRLearner(model_regression=LinearRegression(),
model_propensity=LogisticRegression(), featurizer=None)
cate_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
fnames = ['Q' + str(i) for i in range(TestInference.d_x)]
summary_results = cate_est.summary(T=1, feature_names=fnames)
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
np.testing.assert_array_equal(coef_rows, fnames)
cate_est = LinearDRLearner(model_regression=LinearRegression(),
model_propensity=LogisticRegression(), featurizer=None)
wrapped_est = self._NoFeatNamesEst(cate_est)
wrapped_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
summary_results = wrapped_est.summary(T=1)
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
np.testing.assert_array_equal(coef_rows, ['X' + str(i) for i in range(TestInference.d_x)])
cate_est = LinearDRLearner(model_regression=LinearRegression(),
model_propensity=LogisticRegression(), featurizer=None)
wrapped_est = self._NoFeatNamesEst(cate_est)
wrapped_est.fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W,
inference=inference
)
fnames = ['Q' + str(i) for i in range(TestInference.d_x)]
summary_results = wrapped_est.summary(T=1, feature_names=fnames)
coef_rows = np.asarray(summary_results.tables[0].data)[1:, 0]
np.testing.assert_array_equal(coef_rows, fnames)
def test_degenerate_cases(self):
"""Test that we return the correct values when our distribution doesn't vary"""
predictions = np.array([[1, 0], [1, 1]]) # first component is always 1
for inf in [EmpiricalInferenceResults(d_t=1, d_y=2,
pred=np.mean(predictions, axis=0), pred_dist=predictions,
inf_type='coefficient'),
NormalInferenceResults(d_t=1, d_y=2,
pred=np.mean(predictions, axis=0), pred_stderr=np.std(predictions, axis=0),
mean_pred_stderr=None,
inf_type='coefficient')]:
zs = inf.zstat()
pv = inf.pvalue()
# test value 0 is less than estimate of 1 and variance is 0, so z score should be inf
assert np.isposinf(zs[0])
# predictions in column 1 have nonzero variance, so the zstat should always be some finite value
assert np.isfinite(zs[1])
assert pv[0] == 0 # pvalue should be zero when test value is greater or less than all samples
test_point = np.array([1, 0.5])
zs = inf.zstat(test_point)
pv = inf.pvalue(test_point)
# test value 1 is equal to the estimate of 1 and variance is 0, so z score should be nan
assert np.isnan(zs[0])
# predictions in column 1 have nonzero variance, so the zstat should always be some finite value
assert np.isfinite(zs[1])
# pvalue is also nan when variance is 0 and the point tested is equal to the point tested
assert np.isnan(pv[0])
# pvalue for second column should be greater than zero since some points are on either side
# of the tested value
assert 0 < pv[1] <= 1
test_point = np.array([2, 1])
zs = inf.zstat(test_point)
pv = inf.pvalue(test_point)
# test value 2 is greater than estimate of 1 and variance is 0, so z score should be -inf
assert np.isneginf(zs[0])
# predictions in column 1 have nonzero variance, so the zstat should always be some finite value
assert np.isfinite(zs[1])
# pvalue is also nan when variance is 0 and the point tested is equal to the point tested
assert pv[0] == 0 # pvalue should be zero when test value is greater or less than all samples
pop = PopulationSummaryResults(np.mean(predictions, axis=0).reshape(1, 2), np.std(
predictions, axis=0).reshape(1, 2), None, d_t=1, d_y=2, alpha=0.05, value=0, decimals=3, tol=0.001)
pop._print() # verify that we can access all attributes even in degenerate case
pop.summary()
def test_can_summarize(self):
LinearDML(model_t=LinearRegression(), model_y=LinearRegression()).fit(
TestInference.Y,
TestInference.T,
TestInference.X,
TestInference.W
).summary()
LinearDRLearner(model_regression=LinearRegression(),
model_propensity=LogisticRegression(), fit_cate_intercept=False).fit(
TestInference.Y,
TestInference.T > 0,
TestInference.X,
TestInference.W,
inference=BootstrapInference(5)
).summary(1)
def test_inference_with_none_stderr(self):
Y, T, X, W = TestInference.Y, TestInference.T, TestInference.X, TestInference.W
est = DML(model_y=LinearRegression(),
model_t=LinearRegression(),
model_final=Lasso(alpha=0.1, fit_intercept=False),
featurizer=PolynomialFeatures(degree=1, include_bias=False),
random_state=123)
est.fit(Y, T, X=X, W=W)
est.summary()
est.coef__inference().summary_frame()
est.intercept__inference().summary_frame()
est.effect_inference(X).summary_frame()
est.effect_inference(X).population_summary()
est.const_marginal_effect_inference(X).summary_frame()
est.marginal_effect_inference(T, X).summary_frame()
est = NonParamDML(model_y=LinearRegression(),
model_t=LinearRegression(),
model_final=LinearRegression(fit_intercept=False),
featurizer=PolynomialFeatures(degree=1, include_bias=False),
random_state=123)
est.fit(Y, T, X=X, W=W)
est.effect_inference(X).summary_frame()
est.effect_inference(X).population_summary()
est.const_marginal_effect_inference(X).summary_frame()
est.marginal_effect_inference(T, X).summary_frame()
est = DRLearner(model_regression=LinearRegression(),
model_propensity=LogisticRegression(),
model_final=LinearRegression())
est.fit(Y, T, X=X, W=W)
est.effect_inference(X).summary_frame()
est.effect_inference(X).population_summary()
est.const_marginal_effect_inference(X).summary_frame()
est.marginal_effect_inference(T, X).summary_frame()
def test_auto_inference(self):
Y, T, X, W = TestInference.Y, TestInference.T, TestInference.X, TestInference.W
est = DRLearner(model_regression=LinearRegression(),
model_propensity=LogisticRegression(),
model_final=StatsModelsLinearRegression())
est.fit(Y, T, X=X, W=W)
est.effect_inference(X).summary_frame()
est.effect_inference(X).population_summary()
est.const_marginal_effect_inference(X).summary_frame()
est.marginal_effect_inference(T, X).summary_frame()
est = DRLearner(model_regression=LinearRegression(),
model_propensity=LogisticRegression(),
model_final=LinearRegression(),
multitask_model_final=True)
est.fit(Y, T, X=X, W=W)
with pytest.raises(AttributeError):
est.effect_inference(X)
est = DML(model_y=LinearRegression(),
model_t=LinearRegression(),
model_final=StatsModelsLinearRegression(fit_intercept=False),
random_state=123)
est.fit(Y, T, X=X, W=W)
est.summary()
est.coef__inference().summary_frame()
assert est.coef__inference().stderr is not None
est.intercept__inference().summary_frame()
assert est.intercept__inference().stderr is not None
est.effect_inference(X).summary_frame()
assert est.effect_inference(X).stderr is not None
est.effect_inference(X).population_summary()
est.const_marginal_effect_inference(X).summary_frame()
assert est.const_marginal_effect_inference(X).stderr is not None
est.marginal_effect_inference(T, X).summary_frame()
assert est.marginal_effect_inference(T, X).stderr is not None
est = NonParamDML(model_y=LinearRegression(),
model_t=LinearRegression(),
model_final=DebiasedLasso(),
random_state=123)
est.fit(Y, T, X=X, W=W)
est.effect_inference(X).summary_frame()
assert est.effect_inference(X).stderr is not None
est.effect_inference(X).population_summary()
est.const_marginal_effect_inference(X).summary_frame()
assert est.const_marginal_effect_inference(X).stderr is not None
est.marginal_effect_inference(T, X).summary_frame()
assert est.marginal_effect_inference(T, X).stderr is not None
def test_pickle_inferenceresult(self):
Y, T, X, W = TestInference.Y, TestInference.T, TestInference.X, TestInference.W
est = DML(model_y=LinearRegression(),
model_t=LinearRegression(),
model_final=Lasso(alpha=0.1, fit_intercept=False),
featurizer=PolynomialFeatures(degree=1, include_bias=False),
random_state=123)
est.fit(Y, T, X=X, W=W)
effect_inf = est.effect_inference(X)
s = pickle.dumps(effect_inf)
def test_mean_pred_stderr(self):
"""Test that mean_pred_stderr is not None when estimator's final stage is linear"""
Y, T, X, W = TestInference.Y, TestInference.T, TestInference.X, TestInference.W
ests = [LinearDML(model_t=LinearRegression(), model_y=LinearRegression(),
featurizer=PolynomialFeatures(degree=2,
include_bias=False)
),
LinearDRLearner(model_regression=LinearRegression(), model_propensity=LogisticRegression(),
featurizer=PolynomialFeatures(degree=2,
include_bias=False)
)]
for est in ests:
est.fit(Y, T, X=X, W=W)
assert est.const_marginal_effect_inference(X).population_summary().mean_pred_stderr is not None
# only is not None when T1 is a constant or a list of constant
assert est.effect_inference(X).population_summary().mean_pred_stderr is not None
if est.__class__.__name__ == "LinearDRLearner":
assert est.coef__inference(T=1).mean_pred_stderr is None
else:
assert est.coef__inference().mean_pred_stderr is None
def test_isolate_inferenceresult_from_estimator(self):
Y, T, X, W = TestInference.Y, TestInference.T, TestInference.X, TestInference.W
est = LinearDML().fit(Y, T, X=X, W=W)
coef = est.coef_
inf = est.coef__inference()
inf.pred[0] = .5
new_coef = est.coef_
np.testing.assert_array_equal(coef, new_coef)
def test_translate(self):
Y, T, X, W = TestInference.Y, TestInference.T, TestInference.X, TestInference.W
for offset in [10, pd.Series(np.arange(TestInference.X.shape[0]))]:
for inf in ['auto', BootstrapInference(n_bootstrap_samples=5)]:
est = LinearDML().fit(Y, T, X=X, W=W, inference=inf)
inf = est.const_marginal_effect_inference(X)
pred, bounds, summary = inf.point_estimate, inf.conf_int(), inf.summary_frame()
inf.translate(offset)
pred2, bounds2, summary2 = inf.point_estimate, inf.conf_int(), inf.summary_frame()
np.testing.assert_array_equal(pred + offset, pred2)
np.testing.assert_array_almost_equal(bounds[0] + offset, bounds2[0])
np.testing.assert_array_almost_equal(bounds[1] + offset, bounds2[1])
def test_scale(self):
Y, T, X, W = TestInference.Y, TestInference.T, TestInference.X, TestInference.W
for factor in [10, pd.Series(np.arange(TestInference.X.shape[0]))]:
for inf in ['auto', BootstrapInference(n_bootstrap_samples=5)]:
est = LinearDML().fit(Y, T, X=X, W=W, inference=inf)
inf = est.const_marginal_effect_inference(X)
pred, bounds, summary = inf.point_estimate, inf.conf_int(), inf.summary_frame()
inf.scale(factor)
pred2, bounds2, summary2 = inf.point_estimate, inf.conf_int(), inf.summary_frame()
np.testing.assert_array_equal(pred * factor, pred2)
np.testing.assert_array_almost_equal(bounds[0] * factor, bounds2[0])
np.testing.assert_array_almost_equal(bounds[1] * factor, bounds2[1])
class _NoFeatNamesEst:
def __init__(self, cate_est):
self.cate_est = clone(cate_est, safe=False)
def __getattr__(self, name):
if name != 'cate_feature_names':
return getattr(self.cate_est, name)
else:
return self.__getattribute__(name)
| mit |
musically-ut/statsmodels | statsmodels/iolib/tests/test_summary.py | 31 | 1535 | '''examples to check summary, not converted to tests yet
'''
from __future__ import print_function
if __name__ == '__main__':
from statsmodels.regression.tests.test_regression import TestOLS
#def mytest():
aregression = TestOLS()
TestOLS.setupClass()
results = aregression.res1
r_summary = str(results.summary_old())
print(r_summary)
olsres = results
print('\n\n')
r_summary = str(results.summary())
print(r_summary)
print('\n\n')
from statsmodels.discrete.tests.test_discrete import TestProbitNewton
aregression = TestProbitNewton()
TestProbitNewton.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
print('\n\n')
probres = results
from statsmodels.robust.tests.test_rlm import TestHampel
aregression = TestHampel()
#TestHampel.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
rlmres = results
print('\n\n')
from statsmodels.genmod.tests.test_glm import TestGlmBinomial
aregression = TestGlmBinomial()
#TestGlmBinomial.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
#print(results.summary2(return_fmt='latex'))
#print(results.summary2(return_fmt='csv'))
smry = olsres.summary()
print(smry.as_csv())
# import matplotlib.pyplot as plt
# plt.plot(rlmres.model.endog,'o')
# plt.plot(rlmres.fittedvalues,'-')
#
# plt.show() | bsd-3-clause |
bsipocz/statsmodels | examples/python/tsa_dates.py | 29 | 1169 |
## Dates in timeseries models
from __future__ import print_function
import statsmodels.api as sm
import pandas as pd
# ## Getting started
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# ## Using Pandas
#
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# Instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# ## Using explicit dates
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# Note: This attribute only exists if predict has been called. It holds the dates associated with the last call to predict.
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.