repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sebi06/BioFormatsRead | showZwellplate.py | 1 | 2151 | # -*- coding: utf-8 -*-
"""
@author: Sebi
File: showZwellplate.py
Date: 01.06.2017
Version. 1.4
"""
import bftools as bf
import matplotlib.pyplot as plt
import os
saveformat = '.png'
#filenamecsv = r'testdata/Wellchamber_384_Comb.csv'
#filenamecsv = r'testdata/fixed endpoint 3C 2_5 384well_planetable.csv'
#filenameczi = r'testdata/B4_B5_S=8_4Pos_perWell_T=2_Z=1_CH=1.czi'
#filenameczi = r'c:\Users\m1srh\OneDrive - Carl Zeiss AG\Python_Projects\BioFormatsRead\testdata\B4_B5_S=8_4Pos_perWell_T=2_Z=1_CH=1.czi'
filenameczi = r'c:\Users\m1srh\OneDrive - Carl Zeiss AG\Python_Projects\BioFormatsRead\testdata\testwell96.czi'
#filenameczi = r'c:\Users\M1SRH\Documents\Testdata_Zeiss\RareEvent_Test_Wizard\OverViewScan_Test_raw.czi'
#filenamecsv = r'testdata/testwell96_planetable.csv'
#filenameczi = r'c:\Output\Guided_Acquisition\DTScan_ID4.czi'
#filenameczi= r'c:\Users\m1srh\Documents\Testdata_Zeiss\Castor\fixed endpoint 3C 2_5 384well.czi'
# specify bioformats_package.jar to use if required
# Attention: for larger CZI tile images containing an image pyramid one must still use 5.1.10
# since the latest version is not fully supported by python-bioformats yet
bfpackage = r'bfpackage\5.1.10\bioformats_package.jar'
bf.set_bfpath(bfpackage)
# define separator
separator = '\t'
# create plane info from CZI image file and write CSV file (optional)
planetable, filenamecsv, MetaInfo = bf.get_planetable(filenameczi, writecsv=True, separator=separator, showinfo=True)
# or use the CSV file directly once it was created
#planetable = pd.read_csv(filenamecsv, sep=separator)
# show the dataframe
print(planetable[:10])
print(planetable.shape[0])
# define name for figure to be saved
figuresavename = os.path.splitext(filenamecsv)[0] + '_XYZ-Pos' + saveformat
# display the XYZ positions
fig1, fig2 = bf.scatterplot(planetable,
ImageID=0,
T=0,
CH=0, Z=0, size=250,
savefigure=True,
figsavename=figuresavename,
showsurface=True)
# show the plot
plt.show()
# exit
os._exit(0)
| bsd-2-clause |
tkarna/cofs | test/tracerEq/test_h-diffusion_mes_2d.py | 1 | 6837 | """
Testing 2D horizontal diffusion of tracers against analytical solution.
"""
from thetis import *
import numpy
from scipy import stats
import pytest
def run(refinement, **model_options):
print_output('--- running refinement {:}'.format(refinement))
# domain dimensions - channel in x-direction
lx = 20.0e3
ly = 5.0e3 / refinement
area = lx*ly
depth = 30.0
horizontal_diffusivity = 1.0e3
# mesh
nx = 8 * refinement + 1
ny = 1 # constant -- channel
mesh2d = RectangleMesh(nx, ny, lx, ly)
# simulation run time
t_end = 3000.0
# initial time
t_init = 1000.0 # NOTE start from t > 0 for smoother init cond
t_export = (t_end - t_init)/8.0
# outputs
outputdir = 'outputs'
# bathymetry
p1_2d = get_functionspace(mesh2d, 'CG', 1)
bathymetry_2d = Function(p1_2d, name='Bathymetry')
bathymetry_2d.assign(depth)
solverobj = solver2d.FlowSolver2d(mesh2d, bathymetry_2d)
options = solverobj.options
options.use_nonlinear_equations = False
options.horizontal_velocity_scale = Constant(1.0)
options.no_exports = True
options.output_directory = outputdir
options.simulation_end_time = t_end
options.simulation_export_time = t_export
options.solve_tracer = True
options.use_limiter_for_tracers = True
options.fields_to_export = ['tracer_2d']
options.horizontal_diffusivity = Constant(horizontal_diffusivity)
options.horizontal_viscosity_scale = Constant(horizontal_diffusivity)
options.update(model_options)
solverobj.create_equations()
t = t_init # simulation time
t_const = Constant(t)
u_max = 1.0
u_min = -1.0
x0 = lx/2.0
x, y = SpatialCoordinate(solverobj.mesh2d)
tracer_expr = 0.5*(u_max + u_min) - 0.5*(u_max - u_min)*erf((x - x0)/sqrt(4*horizontal_diffusivity*t_const))
tracer_ana = Function(solverobj.function_spaces.H_2d, name='tracer analytical')
elev_init = Function(solverobj.function_spaces.H_2d, name='elev init')
solverobj.assign_initial_conditions(elev=elev_init, tracer=tracer_expr)
# export analytical solution
if not options.no_exports:
out_tracer_ana = File(os.path.join(options.output_directory, 'tracer_ana.pvd'))
def export_func():
if not options.no_exports:
solverobj.export()
# update analytical solution to correct time
t_const.assign(t)
out_tracer_ana.write(tracer_ana.project(tracer_expr))
# export initial conditions
export_func()
# custom time loop that solves tracer equation only
ti = solverobj.timestepper.timesteppers.tracer
i = 0
iexport = 1
next_export_t = t + solverobj.options.simulation_export_time
while t < t_end - 1e-8:
ti.advance(t)
t += solverobj.dt
i += 1
if t >= next_export_t - 1e-8:
print_output('{:3d} i={:5d} t={:8.2f} s tracer={:8.2f}'.format(iexport, i, t, norm(solverobj.fields.tracer_2d)))
export_func()
next_export_t += solverobj.options.simulation_export_time
iexport += 1
# project analytical solution on high order mesh
t_const.assign(t)
# compute L2 norm
l2_err = errornorm(tracer_expr, solverobj.fields.tracer_2d)/numpy.sqrt(area)
print_output('L2 error {:.12f}'.format(l2_err))
return l2_err
def run_convergence(ref_list, saveplot=False, **options):
"""Runs test for a list of refinements and computes error convergence rate"""
polynomial_degree = options.get('polynomial_degree', 1)
l2_err = []
for r in ref_list:
l2_err.append(run(r, **options))
x_log = numpy.log10(numpy.array(ref_list, dtype=float)**-1)
y_log = numpy.log10(numpy.array(l2_err))
setup_name = 'h-diffusion'
def check_convergence(x_log, y_log, expected_slope, field_str, saveplot):
slope_rtol = 0.20
slope, intercept, r_value, p_value, std_err = stats.linregress(x_log, y_log)
if saveplot:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(5, 5))
# plot points
ax.plot(x_log, y_log, 'k.')
x_min = x_log.min()
x_max = x_log.max()
offset = 0.05*(x_max - x_min)
npoints = 50
xx = numpy.linspace(x_min - offset, x_max + offset, npoints)
yy = intercept + slope*xx
# plot line
ax.plot(xx, yy, linestyle='--', linewidth=0.5, color='k')
ax.text(xx[2*int(npoints/3)], yy[2*int(npoints/3)], '{:4.2f}'.format(slope),
verticalalignment='top',
horizontalalignment='left')
ax.set_xlabel('log10(dx)')
ax.set_ylabel('log10(L2 error)')
ax.set_title(' '.join([setup_name, field_str, 'degree={:}'.format(polynomial_degree)]))
ref_str = 'ref-' + '-'.join([str(r) for r in ref_list])
degree_str = 'o{:}'.format(polynomial_degree)
imgfile = '_'.join(['convergence', setup_name, field_str, ref_str, degree_str])
if options.get('use_automatic_sipg_parameter', False):
imgfile = '_'.join([imgfile, 'sipg_auto'])
imgfile += '.png'
imgdir = create_directory('plots')
imgfile = os.path.join(imgdir, imgfile)
print_output('saving figure {:}'.format(imgfile))
plt.savefig(imgfile, dpi=200, bbox_inches='tight')
if expected_slope is not None:
err_msg = '{:}: Wrong convergence rate {:.4f}, expected {:.4f}'.format(setup_name, slope, expected_slope)
assert slope > expected_slope*(1 - slope_rtol), err_msg
print_output('{:}: convergence rate {:.4f} PASSED'.format(setup_name, slope))
else:
print_output('{:}: {:} convergence rate {:.4f}'.format(setup_name, field_str, slope))
return slope
check_convergence(x_log, y_log, polynomial_degree+1, 'tracer', saveplot)
# ---------------------------
# standard tests for pytest
# ---------------------------
@pytest.fixture(params=[True, False])
def auto_sipg(request):
return request.param
@pytest.fixture(params=['CrankNicolson', 'SSPRK33', 'ForwardEuler', 'BackwardEuler', 'DIRK22', 'DIRK33'])
def stepper(request):
return request.param
def test_horizontal_diffusion(auto_sipg, stepper):
run_convergence([1, 2, 3], polynomial_degree=1,
timestepper_type=stepper,
use_automatic_sipg_parameter=auto_sipg,
)
# ---------------------------
# run individual setup for debugging
# ---------------------------
if __name__ == '__main__':
run_convergence([1, 2, 4, 6, 8], polynomial_degree=1,
timestepper_type='CrankNicolson',
no_exports=False, saveplot=True)
| mit |
jkitzes/macroeco | macroeco/models/_distributions_docstrings.py | 1 | 9902 | # From scipy 0.14 - docstring internals keep changing with scipy updates
# So for now, include it here for import into _distributions
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'parameters': """\nParameters\n---------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(x, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(x, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative density function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative density function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of sf).
"""
_doc_moment = """\
moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = """\
expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = """
Parameters
----------
x : array_like
quantiles
q : array_like
lower or upper tail probability
%(shapes)s : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
"""
_doc_default_longsummary = """\
Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, freeze the distribution and display the frozen pdf:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'callparams': _doc_default_callparams,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'Continuous', 'Discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
docdict_discrete['example'] = _doc_default_discrete_example
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['callparams'],
docdict_discrete['frozennote']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
| bsd-2-clause |
cokelaer/msdas | src/msdas/yeast.py | 1 | 46782 | from __future__ import division
import clustering
import numpy as np
import pylab
import pandas as pd
from msdas import replicates
from readers import MassSpecReader
from easydev import get_share_file as gsf
from cno import CNOGraph
__all__ = ["YEAST", "get_yeast_filenames", "YEAST2MIDAS",
"get_yeast_raw_data", "get_yeast_small_data"]
"""
sim = plotLBodeFitness(cnolist, pknmodel,res, plotParams=list(F=.3, cmap_scale=2, cex=0.5))
Ntimes = length(sim)
N = nrow(sim[[1]]) * ncol(sim[[1]]) * Ntimes
signals = cnolist@signal
sim2 = list()
for(i in seq_along(sim)){
sim2[[i]] = matrix(colMeans((signals[[i]]-sim[[i]])^2, na.rm=T), nrow=1)
}
sim2 = matrix(unlist(sim2), nrow=Ntimes, byrow=T)
sim2 = colMeans(sim2, na.rm=T)
d = data.frame(sim2, row.names=colnames(cnolist@signals[[1]]))
write.table(d, col.names=F, quote=F, sep=",", file="test.csv")
"""
class YEAST(object):
times = [0,1,5,10,20,45]
def __init__(self):
pass
def get_yeast_small_data():
"""Return filename of data related to YEAST
This is the CSV file resulting from the alignment of 6 small data files
provided in the package. Here is the procedure followed to create that file.
::
from msdas import *
m = MassSpecAlignmentYeast(get_yeast_small_files())
# data is now aligned, we can save it
# We also wanted the annotations:
a = AnnotationsYeast(m) # creates the identifiers
a.get_uniprot_entries() # Creates the entry
a.set_annotations() # Creates Entry names
a.check_entries_versus_sequence()
# columns are rename with prefix "a"
a.to_csv("YEAST_small_all_test.csv")
"""
return gsf("msdas", "data", "YEAST_small_all.csv")
def get_yeast_raw_data():
"""Return filename to the YEAST raw data
See :func:`get_yeast_small_data` for procedure on how to generate this
data file. The input filenames used are :meth:`get_yeast_filenames`
"""
return gsf("msdas", "data", "Yeast_all_raw.csv")
def get_yeast_filenames(mode="subset"):
"""Returns the list of filenames related to the YEAST data
:param str mode: valid values are *subset* or *all*. If subset,
filenames looked for are files called alphaX.csv . If mode
is set to *all*, filenames looked for all files
called MaX_annotated_CLEAN_COMPACT_Normedian_FINAL.csv
:return: list of 6 full path names related to the YEAST data set (6 files)
The corresponding files are sorted by times (ie, 0, 1, 5, 10, 20, 45).
The 6 filenames must be provided as input to the :class:`~msdas.alignment.MassSpecAlignmentYeast`.
.. seealso:: :class:`~msdas.alignment.MassSpecAlignmentYeast`.
"""
times = [0, 1, 5, 10, 20, 45]
if mode == "subset":
filenames = [gsf("msdas", "data","alpha%s.csv" % str(x)) for x in times]
return filenames
elif mode == "all":
prefix = "Ma"
suffix = "s04_annotated_CLEAN_COMPACT_NORMmedian_FINAL.csv"
filenames = [gsf("msdas", "data","{}{}{}".format(prefix,str(x),suffix)) for x in times]
return filenames
else:
raise ValueError("mode must be in [subset, all]. Defaults to *subset*")
class YEAST2MIDAS(MassSpecReader, YEAST):
"""Class pipeline to read/cluster/write yeast data into MIDAS/PKN.
The constructor of this class performs the following tasks:
#. Reads small data set (with the interpolated data)
#. Reads the raw data set (with replicates)
#. Cleanup and merging similar to what was done in the small dat set (see below for details)
#. Creates an instance of :class:`msdas.clustering.MSClustering`
Tools are provided to
#. Reads a PKN and build a MIDAS from the data. A new PKN is generated
with new names containing the psites.
#. plot time series
#. perform clustering
YEAST2MIDAS is also a :class:`msdas.readers.MassSpecReader` so you can get
lots of plotting tools in addition to those provided within this class.
Here is an example::
>>> from msdas import yeast
>>> y = yeast.YEAST2MIDAS(get_yeast_small_data(), get_yeast_raw_data())
>>> # get a new data frame containing only exemplars
>>> df = y.get_df_exemplars(preference=-30)
>>> c = y.get_expanded_cnograph("PKN.sif", df)
>>> # create the new MIDAS and a CNOGraph for the new PKN
>>> c, m, e = y.export_pkn_and_midas("../share/data/PKN-yeast.sif")
:Details: The dataframe :attr:`df` contains all measurements and extra
information such as the protein/psites/sequence/uniprot_entry. The
measurements itself is made of a subset of the 36 measurements, which
are combination of 6 times and 6 alpha experiments.
You can imagine the data as a matrix 6 by 6. The measurements that are
kept for MIDAS are made of the first row, first column and main diagonal.
The raw data is preprocessed using the :class:`msdas.replicates.ReplicatesYeast`
and follows those steps:
#. drop rows where peptide contains an oxidation :meth:`msdas.readers.Cleaner.drop_oxidation`
#. Merge peptide with identical peptide sequence (but different psites)
#. clean rows with too much NAs using :meth:`~msdas.replicates.ReplicatesYeast.clean_na_paper`
and :meth:`~msdas.replicates.ReplicatesYeast.clean_na_paper`
#. Drop rows with only NAs
#. normalise the data using the TIC values
#. Call :meth:`~msdas.readers.Cleaner.fix_duplicated_identifier` (rename
duplicated identifiers)
"""
def __init__(self, data=None, rawdata=None, verbose=False) :
""".. rubric:: Constructor
:param data: optional data set (default to :func:`get_yeast_small_data`)
:param rawdata: optional data set (default to :func:`get_yeast_raw_data`)
"""
if data is None:
data = get_yeast_small_data()
if rawdata is None:
rawdata = get_yeast_raw_data()
super(YEAST2MIDAS, self).__init__(data, cleanup=True,
merge_peptides=True, verbose=verbose)
self.logging.info("Reading raw data set")
self.replicates = replicates.ReplicatesYeast(rawdata)
# because of the next drop_na_exceeds function.
# one way to drop NA: 4 values required over the 36 cells
#self.replicates.drop_na_exceeds_minnonzero()
# another way, which looks like what was done in the paper is
# to set to drop rows where one experiment does not have values
self.replicates.drop_oxidation() # to match small data set
self.replicates.merge_identical_peptide_sequence()
#self.replicates._rebuild_identifier()
self.replicates.clean_na_paper() # cross (replaces with NA) alpha experiment
# if not enough values (at least 4 per set of 6 experiment in alpha direction)
self.replicates.clean_na_paper2() # cross (replaces with NA) alpha experiment
# if not enough values (at least 4 per set of 6 experiment in NaCl direction
self.replicates.drop_na_count(108) # to get closer to small data set
self.replicates.normalise() # can be before or after
#self.replicates.merge_peptides()
#self.replicates.drop_oxidation() # to match small data set
self.replicates.sort_psites_ors_only() # ???
self.replicates.fix_duplicated_identifier()
# called once for all
self._cv_buf = pd.DataFrame(self.replicates.get_coefficient_variation())
# here, we set na to zero, just for the clustering
self.cluster = clustering.MSClustering(self, fillna=True, verbose=False)
# FIXME: yeast.df is a reference so if changed, it affects the user parameter
# select only data with the 3 conditions we are interested in
self._measures = list(self.measurements.columns)
self._measures_salt = ["a"+str(a)+"_t"+str(salt) for salt in self.times for a in self.times]
# probably not needed since done in the instanciation above but good check
#self._drop_psites_with_bad_quality()
#self._drop_visual_clustering()
#self.run_clustering()
#print("Scaled dataframe as in the clustering is available in self.df_scaled")
#self.df_scaled = self.cluster.scale(self.cluster.get_group()).transpose()
self.logging.info("data is in `df` attribute and full raw data with replicates in `replicates` attribute")
self._cleanup = False
self.mapping = self.mapping_small_to_raw_june()
def _get_groups(self):
return self.cluster.groups
groups = property(_get_groups,
doc="get the dataframe indices grouped by proteins (alias to cluster.groups)")
def cleanup_june(self):
"""Cleanup the raw and small data set.
The small data set contains labels that are incorrect and do not represent
what pre-processing was performed.
Besides, interpolation (and extrapolation) were applied.
In order to replace the interpolated values by NA, we need to know
the correct label. See :meth:`mapping_small_to_raw_june`. This method
replaces the interpolated values by NAs in the :attr:`df` dataframe, relabel
corerctly the identifiers and removes rows in the big dataframe :attr:`replicates`
that are now useless
FUS3_S177^T180+S177^Y182+T180^Y182', 'HOG1_T174^Y176' time zero are missing
for now, we keep the data with interpolated values.
#. FAR1_S114 has no t0 values. Downstream of PKN. Removed for now
#. 3 RCK2 have no t0 values. Downstream of PKN. Removed for now
"""
if self._cleanup == True:
print("already cleanup. nothing to do")
mapping = self.mapping_small_to_raw_june()
values = [x for x in set(mapping.values()) if x]
# should replace HOG1 t0 a 0.0001 because there is 1 missing value, which
# is the only missing value.
#index = self.replicates['HOG1', "T174^Y176"].index[0]
#self.replicates.df.ix[index, 'a0_t0'] = 0.0001
# Selection of the data in the replicates that we wawnt to look at
selection = self.replicates.df.Identifier.apply(lambda x: x in values)
print(sum(selection))
self.replicates.df = self.replicates.df[selection]
# replace the data with the average of the replicates so that now we have the NAs
# first, let us get the average, keeping track of the identifier as the index of the df
mu = self.replicates.get_mu_df()
mu.index = self.replicates.df.Identifier
# now let us drop identifier that are not in the mapping (keys)
keys = [k for k,v in mapping.iteritems() if v!=None]
# the tokeep should be be touched:
self.tokeep = self.df.copy()
self.df = self.df[self.df.Identifier.apply(lambda x : x in keys)]
# replace identifiers by the correct ones
self.df.Identifier = [mapping[x] for x in self.df.Identifier]
self.df.Psite = [x.split("_")[1] for x in self.df.Identifier]
# not need to redo protein column, which should already be correct.
#replace the data
for identifier in mu.index:
index = self.df[self.df.Identifier == identifier].index[0]
self.df.ix[index, mu.columns] = mu.ix[identifier].values
index = self.tokeep.query("Identifier == 'HOG1_T174^Y176'").index[0]
self.df.ix[index] = self.tokeep.ix[index]
index = self.tokeep.query("Identifier == 'FUS3_S177+T180^Y182'").index[0]
self.df.ix[index] = self.tokeep.ix[index]
self.df.ix[index, 'Identifier'] = 'FUS3_S177^T180+S177^Y182+T180^Y182'
self.df.ix[index, 'Psite'] = 'S177^T180+S177^Y182+T180^Y182'
self._cleanup == True
self.cluster = clustering.MSClustering(self, fillna=False, cleanup=False)
# ???? obosloet ????
def __cleanup_june2(self):
# 'GPD1' 'S23+S24+S27', 'S23+S24+S25+S27', 'S24^S27'
#can be clustered. We keep S14^S27
print("WARNING: should be fixed to not use indices but names")
self.df.drop([15, 16], axis=0, inplace=True) # keep GPD1_S24^S27 only (paper)
self.df.drop([8, 9], axis=0, inplace=True) #DIG2 clustered into S225 (paper)
# DIG1 has 4 clusters: 1 we pick up randomly one of them when there is
# more than 1. no NAs exccept 2 in S272^S275+T277^S279
#S126+S127
#S142 and S330 and S395+S397. kee the latest with ;ean CV 13% against 33 and 16%
self.df.drop([5,1], axis=0, inplace=True)
#S272 AND S272^S275. first has CV 10% and 0 NA; second has 15%.later has 1 NA
# drop S272^S275
self.df.drop(3, axis=0, inplace=True)
#S272^S275+T277^S279
# NO a0_t0 point so removed
self.df.drop(self['FAR1_S114'].index[0], inplace=True)
# 4 NAs
self.df.drop(self['SSK1_S110'].index[0], inplace=True)
self.df.drop(self['SSK1_S673'].index[0], inplace=True)
# STE20 clustering is difficult despite 9 peptides
#We remove T413+S418 because it contains lots of 16 NAs anyway ot of 36
#and 6 out of 16 (MIDAS)
index = self.df[self.df.Identifier == "STE20_T413+S418"].index[0]
self.df.drop(index, inplace=True)
# 35% errors
index = self.df[self.df.Identifier == "STE20_T546+S547"].index[0]
self.df.drop(index, inplace=True)
# we can drop RCK2. It is going to be removed in the export of MIDAS
# isnce not in the PKN, but we can do it here as well
# true for FPS1, SIC, TEC
nodes = ['FPS1', 'RCK2', 'SIC1', 'TEC1']
self.df = self.df[self.df.Protein.apply(lambda x: x not in nodes)]
def corr2d(self, name1, name2, show=True):
d1 = self.get_data_matrix(name1).fillna(0).as_matrix()
d2 = self.get_data_matrix(name2).fillna(0).as_matrix()
d1 /= pylab.sqrt(pylab.sum(d1**2))
d2 /= pylab.sqrt(pylab.sum(d2**2))
from scipy.signal import correlate2d
im = correlate2d(d1, d2)
if show:
pylab.clf();
pylab.subplot(2,2,1)
pylab.imshow(d1, interpolation="None")
pylab.subplot(2,2,2)
pylab.imshow(d2, interpolation="None")
pylab.subplot(2,2,3)
pylab.imshow(im, interpolation="None")
pylab.colorbar()
return im
def corr2d_all(self, mode="max"):
ident = self.df.Identifier
N = len(ident)
df = pd.DataFrame(np.zeros((N,N)), columns=ident.values)
df.index = df.columns
for i1 in self.df.Identifier.index:
print(i1)
for i2 in self.df.Identifier.index:
corr = self.corr2d(ident.ix[i1], ident.ix[i2], show=False)
if mode=="max":
M = corr.max()
elif mode=="integral":
M = corr.sum()/36.
elif mode=="center":
M = corr[5,5]
else:
raise ValueError("mode must be max, integral or center")
df.ix[ident.ix[i1], ident.ix[i2]] = M
return df
def plot_group(self, protein):
df = self.measurements.ix[self.groups[protein]].transpose()
df.columns = self.df.Identifier.ix[self.groups[protein]]
df.plot()
def remove_protein(self, identifier):
self.df.drop(self[identifier].index[0], inplace=True)
def remove_species_not_in_pkn(self, filename):
c = CNOGraph(filename)
# let us get rid of the data that will not be available
nodes = [y for y in set(self.df.Protein) if y not in sorted(set([x.split("_")[0] for x in c.nodes()]))]
# ['FPS1', 'RCK2', 'SIC1', 'TEC1']
self.df = self.df[self.df.Protein.apply(lambda x: x not in nodes)]
return nodes
def get_group_psite_transposed(self, name):
"""Get a dataframe containing a group of protein
:param str name: a valid protein name. See :attr:`df.Protein` to get a
list
:return: transposed dataframe (indices are psites, columns are
measurements for ech combination of alpha/NaCl
.. seealso:: :meth:`get_group_psite`
"""
return self.cluster.get_group(name).transpose()
def get_group_psite(self, name):
"""Get a dataframe containing a groupd of psites for one protein
:param str name: a valid protein name. See :attr:`df.Protein` to get a
list
:return: dataframe (indices are psites, columns are
measurements for ech combination of alpha/time (see class
documentation)
.. plot::
:include-source:
:width: 80%
>>> from msdas import yeast
>>> y = yeast.YEAST2MIDAS()
>>> y.get_group_psite("DIG1").plot()
.. seealso:: :meth:`get_group_psite_transposed`
"""
return self.cluster.get_group(name)
def groups_cluster(self):
"""Returns dataframe grouped by protein and clustered.
You must run :meth:`run_affinity_propagation_clustering` before hand
to populate the **cluster** column.
::
>>> y.cluster = y['DIG1']
>>> y.cluster.run_affinity_propagation_clustering(preference=-30)
>>> y.groups_cluster()
"""
if "cluster" not in self.cluster.df.columns:
raise ValueError("cluster column not found in the dataframe. See documentation example.")
return self.cluster.df.groupby(["Protein", "cluster"]).groups
def get_psites_exemplars(self, preference=-30):
"""Returns list of psites corresponding to the exemplars found in the clustering
:param float preference: the parameter of the Affinity Propagation
algorithm. See :class:`msdas.cluster.Affinity`
Affinity propagation algorithm is run on each protein. For each of them,
clusters and their exemplars are found. This method returns the list of
exemplars
>>> psites = y.get_psites_exemplars(preference=-30)
.. seealso:: :meth:`get_psites_mapping`.
"""
psites_tokeep = []
proteins = list(set(self.df.Protein))
self.af_results = {}
for protein in proteins:
self.logging.debug(protein),
if len(self.groups[protein]) == 1:
self.logging.debug(" : no clustering required.")
psite = list(self.df.ix[self.groups[protein]]['Identifier'])
psites_tokeep.append(psite[0])
else:
af = clustering.Affinity(self.get_group_psite(protein),
method="euclidean", transpose=True, preference=preference,
verbose=False)
# Get the indices in the entire dataframe of the exemplars:
indices = np.array(self.groups[protein])[af.cluster_centers_indices]
self.logging.debug("Found %s clusters " % len(indices))
psites_tokeep.extend(self.df.Identifier[indices])
self.af_results[protein] = af
return psites_tokeep
def get_psites_mapping(self, preference=-30):
"""Returns exemplars and the list of proteins in the same cluster
Affinity propagation algorithm is run on each protein. For each of them,
clusters and their exemplars are found. This method returns a dictionary
where each key is an exemplar and its value contains the list of
protein/psites that belongs to this cluster.
:param float preference: the parameter of the Affinity Propagation
algorithm. See :class:`msdas.clustering.Affinity`
:return: dictionary as explained in the documentation here above.
.. seealso:: :meth:`get_psites_exemplars`.
"""
# this call populates the af_results that can be used here below
self.logging.debug("Entering get_psites_mapping----------------------------")
psites = self.get_psites_exemplars(preference=preference)
all_proteins = list(set(self.df.Protein))
res = {}
for protein in all_proteins:
if protein in self.af_results.keys():
self.logging.debug("in clustered protein")
af = self.af_results[protein]
names = af.df.columns
d = dict([(names[af.cluster_centers_indices[i]] ,
list(names[af.labels==i])) for i in set(af.labels)])
res.update(d)
else:
name = [x for x in psites if x.split("_")[0] == protein]
res.update({protein:name})
for name in sorted(res.keys()):
self.logging.debug("%20s" % name + "\t" + ", ".join(res[name]))
return res
def get_df_exemplars(self, preference=-30, normalise=None):
"""Returns a normalised dataframe that corresponds to the exemplars
The psites are obtained from :meth:`get_psites_exemplars`.
:param float preference: the parameter of the Affinity Propagation
algorithm. See :class:`msdas.cluster.Affinity`
:param normalise: minmax / maxonly / timezero. Set to None to ignore
normalisation
"""
psites = self.get_psites_exemplars(preference=preference)
newdf = self.cluster.get_group()[psites].transpose()
if normalise=="minmax":
for index in newdf.index:
M = newdf.ix[index].max()
m = newdf.ix[index].min()
newdf.ix[index] = (newdf.ix[index] - m)/(M-m)
elif normalise=="maxonly":
for index in newdf.index:
M = newdf.ix[index].max()
newdf.ix[index] = newdf.ix[index] / M
elif normalise == "timezero":
for index in newdf.index:
m = newdf.ix[index][0]
newdf.ix[index] = newdf.ix[index] - m
M = newdf.ix[index].max()
newdf.ix[index] /= M
else:
pass
return newdf
def to_midas(self, filename, preference=-1):
"""Given a dataframe, export measurements into MIDAS format
:param str filename:
:param int preference: a negative parameter used in the clustering. See
:class:`msdas.clustering.Affinity`
:return: the MIDAS object.
Internally, a new dataframe is created by selecting the
examplars from the clustering (Default preference of -1 is in principle
equivalen of not having any clustering). Then, the MIDAS file is
created and saved into a file.
::
y = yeast.YEAST2MIDAS(get_yeast_small_data(), get_yeast_raw_data() )
m = y.to_midas(df, "MD-test.csv")
from cno import XMIDAS
m = XMIDAS("MD-test.csv")
"""
# create a midas builder
df = self.get_df_exemplars(preference=preference)
mb = self._get_midas_builder_from_df(df)
# and save to
m = mb.xmidas
m.to_midas(filename)
return m
def _get_midas_builder_from_df(self, df):
# exports alpha=0, as one condition, NaCl=0 as 1 conditions and
# alpha=NaCl= on (where t_alpha = t_NaCl) as a thrid condition
from cno import MIDASBuilder
m = MIDASBuilder()
from cno.io.measurements import Measurement as Experiment
_measures = ['a0_t0', 'a0_t1', 'a0_t5', 'a0_t10', 'a0_t20', 'a0_t45',
'a1_t1', 'a5_t5', 'a10_t10', 'a20_t20', 'a45_t45',
'a1_t0', 'a5_t0', 'a10_t0', 'a20_t0', 'a45_t0']
df = df[_measures]
inhibitors = {}
for psite in df.index:
for col in df.columns:
value = df.ix[psite][col]
alpha, nacl = col.split("_")
if col == "a0_t0":
# special case that need to be added 3 times i.e. a0_t0
time = 0
stimuli = {"a":1, "NaCl":1}
e = Experiment(psite, time, stimuli, inhibitors, value)
m.add_measurements([e])
stimuli = {"a":1, "NaCl":0}
e = Experiment(psite, time, stimuli, inhibitors, value)
m.add_measurements([e])
stimuli = {"a":0, "NaCl":1}
e = Experiment(psite, time, stimuli, inhibitors, value)
m.add_measurements([e])
elif col.startswith("a0"):
# case alpha=0, NaCl=1 e.g., a0_t5
time = int(nacl[1:])
stimuli = {"a":0, "NaCl":1}
e = Experiment(psite, time, stimuli, inhibitors, value)
m.add_measurements([e])
elif col.endswith("t0"):
# case alpha=1, NaCl=0 e.g., a5_t0
time = int(alpha[1:])
stimuli = {"a":1, "NaCl":0}
e = Experiment(psite, time, stimuli, inhibitors, value)
m.add_measurements([e])
else:
# case alpha=1, NaCl=1 e.g. a5_t5
time = int(alpha[1:])
# could be time = int(nacl[1:])
stimuli = {"a":1, "NaCl":1}
e = Experiment(psite, time, stimuli, inhibitors, value)
m.add_measurements([e])
return m
def get_expanded_cnograph(self, filename_pkn_no_sites, df):
"""Expands node in a SIF into their psites
:param str filename_pkn_no_sites: a valid filename to a PKN in SIF
format.
:param dataframe df: a dataframe with protein/psites names as indices
like the output of :meth:`get_df_exemplars`.
:return: a CNOGraph instance
::
df = y.get_df_exemplars(-1)
y.get_expanded_cnograph("../../share/data/PKN-yeastScaffold.sif", df)
"""
from cno import CNOGraph
c = CNOGraph(filename_pkn_no_sites)
proteins = [this.split("_")[0] for this in df.index]
proteins = list(set(proteins))
c._signals = [x.replace("^", "~") for x in proteins[:]]
for protein in proteins:
psites = [this for this in df.index if this.split("_")[0] == protein]
psites = [psite.replace("^", "~") for psite in psites]
psites = [psite.replace("+", "-") for psite in psites]
self.logging.debug(protein ,psites)
if psites is not None and len(psite) == 1:
# just rename the node in the PKN if found
if protein in c.nodes():
#print("Warning. renaming %s into %s" % (protein, psites[0]))
c = c.relabel_nodes({protein: psites[0]})
else:
self.debug("Warning. %s not found in PKN" % protein)
else:
if protein in c.nodes():
self.debug("split {} in {} nodes".format(protein, psites))
c.split_node(protein, psites)
else:
self.debug("Warnig. %s not found in PKN" % protein)
c._stimuli = ["a", "NaCl"]
#c._signals = [df.index
return c
def export_pkn_and_midas_june(self, pkn, tag="undefined"):
df = self.measurements
df.index = self.df.Identifier
c = self.get_expanded_cnograph(pkn, df)
# midas
m = self.get_midas()
m.df.columns = [x.replace("^", "~") for x in m.df.columns]
m.df.columns = [x.replace("+", "-") for x in m.df.columns]
m.sim.columns = m.df.columns[:]
m.errors = m.sim.copy()
#FIXME: bug in cellnopt.core.xmidas need to set cellline manually if created from mifdasbuilder
m._cellLine = "undefined"
# FIXME: make sure the order is correct
#m.experiments.columns = ["NaCl", "a"]
df = self.get_cv()
errors = self._get_midas_builder_from_df(df).xmidas
errors.df.columns = [x.replace("^", "~") for x in errors.df.columns]
errors.df.columns = [x.replace("+", "-") for x in errors.df.columns]
# FIXME: should use the mapping here
errors.df = errors.df[m.df.columns]
errors.create_empty_simulation() # FIXME required to update sim and have corret plotting
# rename
return c,m, errors
def export_pkn_and_midas(self, pkn_filename,
preference=-30, tag="undefined"):
"""Creates the expanded PKN and MIDAS file given PKN and normalise method
Saves new PKN and MIDAS into PKN-Yeast_psites.sif and MD-Yeast_psites.cv
:param str pkn_filename:
:param float preference: the parameter of the Affinity Propagation
algorithm. See :class:`msdas.cluster.Affinity`
:return: a tuple with cnograph and midas instances.
"""
output_pkn_filename = "PKN-Yeast_psites_%s.sif" % tag
output_midas_filename = "MD-Yeast_%s.csv" % tag
normalise = "None"
print("Generating new dataframe given preference of {} for the clustering".format(preference))
newdf = self.get_df_exemplars(normalise=normalise, preference=preference)
#newdf = newdf[self.df.columns]
#print newdf.columns
print("Expanding the original PKN and saving into {}".format(output_pkn_filename))
c = self.get_expanded_cnograph(pkn_filename, newdf)
print("Creating the MIDAS file into {} ".format(output_midas_filename))
# raw data contains all measurements
m = self._get_midas_builder_from_df(newdf)
m = m.xmidas
m.df.columns = [x.replace("^", "~") for x in m.df.columns]
m.df.columns = [x.replace("+", "-") for x in m.df.columns]
m.sim.columns = m.df.columns[:]
m.errors = m.sim.copy()
# need to remove species in MIDAS that are not in the PKN
#pkn = [x for x in c.nodes() if x not in c._find_and_nodes()]
not_found = []
#print m.names_species
for name in m.names_species:
if name not in [x for x in c.nodes() if x not in c._find_and_nodes()]:
print("{} not found in PKN. Removing from MIDAS file".format(name))
# FUS3_S177+T180~Y182 not found in PKN
not_found.append(name)
print("to be removed")
print(not_found)
#print m.df.index
#print m.df.columns
print(m.df.columns)
m.remove_species(not_found)
#FIXME: bug in cellnopt.core.xmidas need to set cellline manually if created from mifdasbuilder
m._cellLine = "undefined"
# FIXME: make sure the order is correct
#m.save(output_midas_filename)
# get the errors, filter with respect to the species in m
e = self.xmidas_errors()
for this in e.df.columns:
if this not in m.df.columns:
e.df.drop(this,axis=1, inplace=True)
# need to reset the simulation. This is usefule for the layout
e.create_empty_simulation()
e._cellLine = "undefined"
# FIXME: make sure the order is correct
e.experiments.columns = ["NaCl", "a"]
#m.save(output_midas_filename)
return (c, m, e)
def get_errors(self, default_cv=0.5):
"""Return errors (coefficient variation) for each identifier
To get the actual errors scaled, you need to multiply by the mean, that is
the data itself.
.. todo:: could be move to replicates module
"""
errors = []
self.logging.info("filling NA with {}".format(default_cv))
for this in self.df.Identifier:
error = self.get_coefficient_variation(this, default_cv=default_cv)
errors.append(error)
df = pd.DataFrame(errors)
df.index = self.df.Identifier # set psites as indices
df = df[self._measures] # rearrange columns
return df
def xmidas_errors(self, default_cv=0.5):
"""Return coefficient of variation into a XMIDAS object"""
df = self.get_errors(default_cv=default_cv)
errors = self._get_midas_builder_from_df(df).xmidas
errors.df.columns = [x.replace("^","~").replace("+", "-") for x in errors.df.columns]
try:
errors.cellLine = "undefined"
except:
pass
return errors
def mapping_small_to_raw_june(self):
mapping = {}
for k in self.df.Identifier:
mapping[k] = k
mapping['DIG1_S272^T277^S279'] = 'DIG1_S272^S275+T277^S279'
mapping['GPD1_S24+S25+S27'] = 'GPD1_S23+S24+S25+S27'
mapping['GPD1_S23+S24'] = 'GPD1_S23+S24+S27'
mapping["RCK2_S33+T35+T44+S46"] = "RCK2_S32+S33+T35+T44+S46"
mapping["RCK2_S45"] = "RCK2_S45+S46"
mapping['RCK2_S32+S33'] = "RCK2_S32+S33+T35"
mapping['DIG1_S395'] = 'DIG1_S395+S397'
mapping['GPA1_S199'] = 'GPA1_T189+S199+S200'
mapping['PTP2_S258'] = 'PTP2_Y257+S258'
mapping['PBS2_S68'] = 'PBS2_S68+S71+S83'
mapping['FUS3_T180'] = 'FUS3_T173+S177+T180+Y182'
mapping["FUS3_S177+T180^Y182"] = 'FUS3_S177^T180+S177^Y182+T180^Y182'
mapping["SKO1_S94^S108^T113"] = "SKO1_S94+S96^S108^T113"
mapping['SSK1_S351'] = "SSK1_S350+S351"
mapping['STE11_S326'] = "STE11_S323+S326+S326" # TODO fix this psite name
mapping['STE20_S192'] = "STE20_S192+S195"
mapping['STE20_S195'] = "STE20_S192+S195+S196"
mapping['STE20_S196^T197'] = "STE20_S192^S195+S196^T197"
mapping['STE20_S418'] = "STE20_T413+S418"
mapping["STE20_T170^T172"] = "STE20_S169+T170^T172"
mapping["STE20_T203^T207"] = "STE20_T203^T207+T217+T218"
mapping["STE20_T573"] = "STE20_T573+T575"
#mapping["STE20_T511"] = "STE20_T511_1"
mapping['DIG2_T83'] = 'DIG2_T83+S84'
mapping['DIG2_S84'] = 'DIG2_S84+T83'
mapping['SIC1_S201'] = 'SIC1_S198+S201'
mapping['SIC1_T173'] = 'SIC1_T173+S175'
mapping['SIC1_S191'] = 'SIC1_S191_1'
mapping['DIG2_S84'] = 'DIG2_T83+S84_2'
mapping['DIG2_T83'] = 'DIG2_T83+S84_1'
"""# for now, let us ignore DIG1, DIG2, and STE12, downstream of FUS3
for k,v in mapping.iteritems():
for this in ['DIG1', 'DIG2', 'STE12']:
if k.startswith(this):
mapping[k] = None
"""
return mapping
def get_midas(self):
"""Return subset of the small data as a MIDAS object"""
data = self.measurements
data.index = self.df.Identifier
m = self._get_midas_builder_from_df(data).xmidas
m.df.columns = [x.replace("^", "~") for x in m.df.columns]
m.df.columns = [x.replace("+", "-") for x in m.df.columns]
return m
def get_cv(self):
"""Return coefficient of variation with proper indices.
.. todo:: could be in replicates
"""
cv = pd.DataFrame(self.replicates.get_coefficient_variation())
cv.index = self.replicates.df.Identifier
return cv
def get_coefficient_variation(self, identifier, default_cv=0.5):
"""Return CV for a particular identifier
.. todo:: could be in replicates
"""
# no more duplicatesd rows to average
#mapping = self.mapping_small_to_raw_june()
cv = self._cv_buf
indices = list(self.replicates.df[self.replicates.df.Identifier == identifier].index)
if len(indices)>1:
self.logging.warning("get_coefficient_variation on several rows...")
errors = cv.ix[indices]
errors.index = self.replicates.metadata.ix[indices]['Identifier']
errors = errors.mean() # takes the mean of the errors.
errors.fillna(default_cv, inplace=True)
return errors
def pcolor_errors(self, vmax=None, cmap="hot_r", default_cv=np.nan, fontsize=8, vmin=0,
salt=False):
"""plot coefficient of variation for the small daa set using replicates
found in the raw data set.
:param salt: if set to True, re-arrange the columns based on salt
rather than alpha
:return: errors in a dataframe
.. plot::
:include-source:
:width: 80%
from msdas import *
y = YEAST2MIDAS(get_yeast_small_data(), get_yeast_raw_data())
y.cleanup_june() # replaces data with regenerated data including NAs
errors = y.pcolor_errors(vmax=1,vmin=.2,fontsize=7)
"""
errors = self.get_errors(default_cv=default_cv)
errors = errors.ix[sorted(errors.index)]
if salt:
errors = errors[self._measures_salt]
mask = np.ma.array(errors.as_matrix(), mask=np.isnan(errors.as_matrix()))
pylab.clf();
if vmax == None:
vmax = errors.max().max()
cmap = pylab.get_cmap(cmap)
cmap.set_bad("grey", 1)
pylab.pcolormesh(mask, vmin=vmin, vmax=vmax, cmap=cmap)
pylab.colorbar()
N, M = errors.shape
pylab.ylim([0, N])
pylab.xlim(0, M)
pylab.xticks([0.5 +x for x in range(0, M)], errors.columns,
rotation=90, fontsize=fontsize)
pylab.yticks([0.5+x for x in range(0, N)], errors.index, fontsize=fontsize)
# adding the errors to be conservative
pylab.tight_layout()
return errors
def pcolor_na(self, raw=False, fontsize=8):
"""Plot number of NA for protein that are in the small data set
Final number of rows is therefore larger thatn in the small data set.
overlap between small and raw data is 32 rows. the remaining 25 are combination from the
raw data set.
.. plot::
:include-source:
:width: 80%
from msdas import *
import pylab
y = YEAST2MIDAS(get_yeast_small_data(), get_yeast_raw_data())
y.cleanup_june()
errors = y.pcolor_na()
"""
proteins = list(set(self.df.Protein))
r = self.replicates
psites = r.metadata.ix[r.metadata.query("Protein in proteins", engine="python").index].Identifier
tags = self._measures
NAs = {}
for tag in tags:
df = r.get_replicates_from_one_unique_measurement(tag)
nas = 3 - pd.notnull(df.ix[psites.index]).sum(axis=1)
NAs[tag] = nas.values
NAs = pd.DataFrame(NAs, index=psites)
NAs = NAs[self._measures]
NAs = NAs.ix[sorted(NAs.index)]
pylab.clf()
pylab.pcolor(NAs)
N, M = NAs.shape
pylab.ylim([0,N])
pylab.xlim(0, M)
pylab.xticks([0.5 +x for x in range(0,M)], NAs.columns, rotation=90, fontsize=fontsize)
pylab.yticks([0.5+x for x in range(0,N)], NAs.index, fontsize=fontsize)
pylab.colorbar()
pylab.tight_layout()
return NAs
def plot_psites_mapping(self, preference=-30):
"""For each protein, plot all data with the exemplar highlighted.
The clustering is made with :mod:`msdas.clustering` and its affinity
propagation algorithm. In the yeast case, clustering is performed on
euclidean distance.
::
from msdas import *
y = yeast.YEAST2MIDAS(get_yeast_small_data(), get_yeast_raw_data(), drop_non_midas=False
y.plot_psites_mapping()
"""
mapping = self.get_psites_mapping(preference=preference)
for k,v in mapping.iteritems():
if len(v)>1:
protein = k.split("_")[0]
df = self.df.query("Protein==protein", engine="python")
df = df.set_index("Identifier").drop(self.metadata.drop("Identifier", axis=1), axis=1)
df = df.transpose()
v.remove(k)
ax = df.apply(lambda x:x/np.sqrt((x.abs()**2).sum()))[v].plot(marker='o', lw=1)
df.apply(lambda x:x/np.sqrt((x.abs()**2).sum()))[k].plot(lw=2, ax=ax, marker='o')
def plot_timeseries_midas(self, psite="DIG1_S126+S127", sigma=2, hold=False):
"""Plot data related to a given identifier restricted to the MIDAS-compatible set
Data that can be used in the ODE package must be time-series. This
correspond to salt=0, or NaCl=0 or salt==NaCl; IN the 6x6 matrix,
this correspond to first row, first colum ad diagonal.
.. plot::
:include-source:
:width: 80%
from msdas import *
y = YEAST2MIDAS(get_yeast_small_data(), get_yeast_raw_data())
y.plot_timeseries("DIG1_S126+S127")
Errors (2 sigmas) are also shown
"""
l = [0,1,5,10,20,45,46,50,55,65,90,91,95,100,120,145]
_measures = ['a0_t0', 'a0_t1', 'a0_t5', 'a0_t10', 'a0_t20', 'a0_t45',
'a1_t1', 'a5_t5', 'a10_t10', 'a20_t20', 'a45_t45',
'a1_t0', 'a5_t0', 'a10_t0', 'a20_t0', 'a45_t0']
df = self.df[_measures].transpose()
df.columns = self.df.Identifier
df['time'] = l
if hold==False:
pylab.clf()
df.plot(x="time", y=psite, marker="o")
pylab.legend([psite])
errors = self.get_errors()
normerr = df[psite]
pylab.errorbar(df['time'],df[psite], yerr=errors.ix[psite].ix[_measures]*normerr*sigma)
pylab.axvline(45.5, alpha=0.5)
pylab.axvline(90.5, alpha=0.5)
Y0 = df[psite]['a0_t0']
pylab.axhline(Y0, alpha=0.5)
pylab.text(20,Y0, "a0_tX")
pylab.text(70,Y0, "aX_tX")
pylab.text(120,Y0, "aX_t0")
pylab.xlim([-.5,146])
def boxplot_errors(self):
""" Show errors (coefficient variation) as a boxplot for each identifier"""
pylab.clf()
self.get_errors(default_cv=np.nan).transpose().boxplot(vert=False)
pylab.tight_layout()
def _find_identifiers_with_missing_time0(self):
"""Return identifiers for which a0_t0 is missing"""
identifiers = self.df[pd.isnull(self.df['a0_t0'])].Identifier
return identifiers.values
def interpolate_time0(self, identifier, cmap="hot_r", method="cubic"):
"""
:param method: 'linear', 'nearest', 'cubic'
"""
# the data may contains NA so interp2d does not work. we use griddata
from scipy.interpolate import griddata
grid_x = np.array([[0]*6,[1]*6,[5]*6,[10]*6,[20]*6,[45]*6]);
grid_y = grid_x.T
data = self.get_data_matrix(identifier).as_matrix()
points = []
values = []
for i in range(0,6):
for j in range(0,6):
if np.isnan(data[i,j]) == False:
points.append([self.times[i], self.times[j]])
values.append(data[i,j])
grid_z0 = griddata(np.array(points), np.array(values), (grid_x, grid_y),
method=method)
pylab.figure(1)
pylab.clf()
pylab.subplot(1,2,1)
mask = np.ma.array(data, mask=np.isnan(data))
cmap = pylab.get_cmap(cmap)
cmap.set_bad("grey", 1)
#pylab.pcolormesh(pylab.flipud(mask), vmin=vmin, vmax=vmax, cmap=cmap)
pylab.imshow(data, interpolation="None", cmap=cmap)
pylab.subplot(1,2,2)
pylab.imshow(pylab.flipud(grid_z0), origin="lower", cmap=cmap, interpolation='None')
pylab.suptitle(identifier)
return data, grid_z0
def plot_figure6(self, identifier,vmin=0,vmax=2):
from easydev import colors
c = colors.ColorMapTools()
d = {'blue': [0,1,1],
'green':[0,1,0],
'red': [1,1,0]}
cmap = c.get_cmap(d, reverse=False)
pylab.clf();
m = self.get_data_matrix(identifier)
im1 = (1./(m.transpose().divide(m[0], axis=1))).transpose()
pylab.subplot(1,2,1)
pylab.imshow(pylab.flipud(im1), origin="lower", interpolation="None",
cmap=cmap,vmin=vmin,vmax=vmax)
pylab.colorbar();
pylab.xlim([.5,5.5]);
pylab.yticks(range(0,6), ['45','20','10', '5','1', 'Phe0'])
for i,label in enumerate([1,2,3,4,5]):
for j,x in enumerate(im1[label].values):
pylab.text(1+i, 6-j-1, int(100*x)/100.)
pylab.subplot(1,2,2)
im2 = (m.ix[0]/m).transpose()
pylab.imshow(im2, origin="lower", interpolation="None", cmap=cmap,
vmin=vmin,vmax=vmax);
pylab.colorbar();
pylab.xlim([.5,5.5]);
pylab.yticks(range(0,6), ['45','20','10', '5','1', 'NaCl0'])
for i,label in enumerate(im2.columns[1:]):
for j,x in im2[label].iterkv():
pylab.text(1+i, 6-j-1, int(100*x)/100.)
pylab.suptitle("%s" % identifier)
return im1, im2
def load_sim_data(filename, times=[0,1,5,10,20,45]):
"""
signals = colnames(cnolist@signals$`0`)
sim = t(sapply(sim, unlist))
colnames(sim) = signals
write.csv(sim, "sim.csv")
sim = yeast.load_sim_data("sim.csv")
m = midas.XMIDAS("MD-Yeast_test_alpha0oneexp_maxonly.csv")
m.sim = sim.copy()
m.plot(mode="trend")
m.plot(mode="mse")
c = CNOGraph("PKN-", "MD-")
for name in c.nodes():
if name in diffs.columns:
c.node[name]['mse'] = diffs[name][0]
else:
c.node[name]['mse'] = None
"""
sim = pd.read_csv(filename, index_col=0)
N = len(sim.columns)
sim['time'] = times
sim['experiment'] = ['experiment_0'] * 6
sim['cellLine'] = ['undefined'] * 6
sim = sim.set_index(['cellLine', 'experiment', 'time'])
return sim
# function for the linear fit to automate the process
def plotfit(x, y, yerr=None, order=1):
w = None if (yerr is None or np.sum(yerr)==0) else 1/yerr
p, cov = np.polyfit(x, y, order, w=w, cov=True) # coefficients and covariance matrix
yfit = np.polyval(p, x) # evaluate the polynomial at x
perr = np.sqrt(np.diag(cov)) # standard-deviation estimates for each coefficient
R2 = np.corrcoef(x, y)[0, 1]**2 # coefficient of determination between x and y
resid = y - yfit
chi2red = np.sum((resid/yerr)**2)/(y.size - 2) if w is not None else np.nan
#return yfit, p, R2, chi2red, perr, resid
pylab.errorbar(x, y, yerr=yerr, fmt = 'bo', ecolor='b', capsize=0, elinewidth=2)
pylab.plot(x,y)
pylab.xlim([-.5,46])
pylab.plot(x, yfit, 'r', linewidth=3, color=[1, 0, 0, .5])
| gpl-3.0 |
MartinSavc/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
danielballan/mpld3 | mpld3/_display.py | 15 | 16996 | import warnings
import random
import json
import jinja2
import numpy
import re
import os
from ._server import serve
from .utils import deprecated, get_id, write_ipynb_local_js
from .mplexporter import Exporter
from .mpld3renderer import MPLD3Renderer
from . import urls
__all__ = ["fig_to_html", "fig_to_dict", "fig_to_d3",
"display_d3", "display",
"show_d3", "show",
"enable_notebook", "disable_notebook",
"save_html", "save_json"]
# Simple HTML template. This works in standalone web pages for single figures,
# but will not work within the IPython notebook due to the presence of
# requirejs
SIMPLE_HTML = jinja2.Template("""
<script type="text/javascript" src="{{ d3_url }}"></script>
<script type="text/javascript" src="{{ mpld3_url }}"></script>
<style>
{{ extra_css }}
</style>
<div id={{ figid }}></div>
<script type="text/javascript">
!function(mpld3){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
}(mpld3);
</script>
""")
# RequireJS template. If requirejs and jquery are not defined, this will
# result in an error. This is suitable for use within the IPython notebook.
REQUIREJS_HTML = jinja2.Template("""
<style>
{{ extra_css }}
</style>
<div id={{ figid }}></div>
<script type="text/javascript">
if(typeof(window.mpld3) !== "undefined" && window.mpld3._mpld3IsLoaded){
!function (mpld3){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
}(mpld3);
}else{
require.config({paths: {d3: "{{ d3_url[:-3] }}"}});
require(["d3"], function(d3){
window.d3 = d3;
$.getScript("{{ mpld3_url }}", function(){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
});
});
}
</script>
""")
# General HTML template. This should work correctly whether or not requirejs
# is defined, and whether it's embedded in a notebook or in a standalone
# HTML page.
GENERAL_HTML = jinja2.Template("""
<style>
{{ extra_css }}
</style>
<div id={{ figid }}></div>
<script>
function mpld3_load_lib(url, callback){
var s = document.createElement('script');
s.src = url;
s.async = true;
s.onreadystatechange = s.onload = callback;
s.onerror = function(){console.warn("failed to load library " + url);};
document.getElementsByTagName("head")[0].appendChild(s);
}
if(typeof(mpld3) !== "undefined" && mpld3._mpld3IsLoaded){
// already loaded: just create the figure
!function(mpld3){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
}(mpld3);
}else if(typeof define === "function" && define.amd){
// require.js is available: use it to load d3/mpld3
require.config({paths: {d3: "{{ d3_url[:-3] }}"}});
require(["d3"], function(d3){
window.d3 = d3;
mpld3_load_lib("{{ mpld3_url }}", function(){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
});
});
}else{
// require.js not available: dynamically load d3 & mpld3
mpld3_load_lib("{{ d3_url }}", function(){
mpld3_load_lib("{{ mpld3_url }}", function(){
{{ extra_js }}
mpld3.draw_figure({{ figid }}, {{ figure_json }});
})
});
}
</script>
""")
TEMPLATE_DICT = {"simple": SIMPLE_HTML,
"notebook": REQUIREJS_HTML,
"general": GENERAL_HTML}
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, (numpy.int_, numpy.intc, numpy.intp, numpy.int8,
numpy.int16, numpy.int32, numpy.int64, numpy.uint8,
numpy.uint16,numpy.uint32, numpy.uint64)):
return int(obj)
elif isinstance(obj, (numpy.float_, numpy.float16, numpy.float32,
numpy.float64)):
return float(obj)
return json.JSONEncoder.default(self, obj)
def fig_to_dict(fig, **kwargs):
"""Output json-serializable dictionary representation of the figure
Parameters
----------
fig : matplotlib figure
The figure to display
**kwargs :
Additional keyword arguments passed to mplexporter.Exporter
Returns
-------
fig_dict : dict
the Python dictionary representation of the figure, which is
directly convertible to json using the standard json package.
See Also
--------
:func:`save_json`: save json representation of a figure to file
:func:`save_html` : save html representation of a figure to file
:func:`fig_to_html` : output html representation of the figure
:func:`show` : launch a local server and show a figure in a browser
:func:`display` : embed figure within the IPython notebook
:func:`enable_notebook` : automatically embed figures in IPython notebook
"""
renderer = MPLD3Renderer()
Exporter(renderer, close_mpl=False, **kwargs).run(fig)
fig, figure_dict, extra_css, extra_js = renderer.finished_figures[0]
return figure_dict
def fig_to_html(fig, d3_url=None, mpld3_url=None, no_extras=False,
template_type="general", figid=None, use_http=False, **kwargs):
"""Output html representation of the figure
Parameters
----------
fig : matplotlib figure
The figure to display
d3_url : string (optional)
The URL of the d3 library. If not specified, a standard web path
will be used.
mpld3_url : string (optional)
The URL of the mpld3 library. If not specified, a standard web path
will be used.
no_extras : boolean
If true, remove any extra javascript or CSS. The output will be similar
to that if the representation output by fig_to_json is embedded in
a web page.
template_type : string
string specifying the type of HTML template to use. Options are:
``"simple"``
suitable for a simple html page with one figure. Will
fail if require.js is available on the page.
``"notebook"``
assumes require.js and jquery are available.
``"general"``
more complicated, but works both in and out of the
notebook, whether or not require.js and jquery are available
figid : string (optional)
The html/css id of the figure div, which must not contain spaces.
If not specified, a random id will be generated.
use_http : boolean (optional)
If true, use http:// instead of https:// for d3_url and mpld3_url.
**kwargs :
Additional keyword arguments passed to mplexporter.Exporter
Returns
-------
fig_html : string
the HTML representation of the figure
See Also
--------
:func:`save_json`: save json representation of a figure to file
:func:`save_html` : save html representation of a figure to file
:func:`fig_to_dict` : output dictionary representation of the figure
:func:`show` : launch a local server and show a figure in a browser
:func:`display` : embed figure within the IPython notebook
:func:`enable_notebook` : automatically embed figures in IPython notebook
"""
template = TEMPLATE_DICT[template_type]
# TODO: allow fig to be a list of figures?
d3_url = d3_url or urls.D3_URL
mpld3_url = mpld3_url or urls.MPLD3_URL
if use_http:
d3_url = d3_url.replace('https://', 'http://')
mpld3_url = mpld3_url.replace('https://', 'http://')
if figid is None:
figid = 'fig_' + get_id(fig) + str(int(random.random() * 1E10))
elif re.search('\s', figid):
raise ValueError("figid must not contain spaces")
renderer = MPLD3Renderer()
Exporter(renderer, close_mpl=False, **kwargs).run(fig)
fig, figure_json, extra_css, extra_js = renderer.finished_figures[0]
if no_extras:
extra_css = ""
extra_js = ""
return template.render(figid=json.dumps(figid),
d3_url=d3_url,
mpld3_url=mpld3_url,
figure_json=json.dumps(figure_json, cls=NumpyEncoder),
extra_css=extra_css,
extra_js=extra_js)
def display(fig=None, closefig=True, local=False, **kwargs):
"""Display figure in IPython notebook via the HTML display hook
Parameters
----------
fig : matplotlib figure
The figure to display (grabs current figure if missing)
closefig : boolean (default: True)
If true, close the figure so that the IPython matplotlib mode will not
display the png version of the figure.
local : boolean (optional, default=False)
if True, then copy the d3 & mpld3 libraries to a location visible to
the notebook server, and source them from there. See Notes below.
**kwargs :
additional keyword arguments are passed through to :func:`fig_to_html`.
Returns
-------
fig_d3 : IPython.display.HTML object
the IPython HTML rich display of the figure.
Notes
-----
Known issues: using ``local=True`` may not work correctly in certain cases:
- In IPython < 2.0, ``local=True`` may fail if the current working
directory is changed within the notebook (e.g. with the %cd command).
- In IPython 2.0+, ``local=True`` may fail if a url prefix is added
(e.g. by setting NotebookApp.base_url).
See Also
--------
:func:`show` : launch a local server and show a figure in a browser
:func:`enable_notebook` : automatically embed figures in IPython notebook
"""
# import here, in case users don't have requirements installed
from IPython.display import HTML
import matplotlib.pyplot as plt
if local:
if 'mpld3_url' in kwargs or 'd3_url' in kwargs:
warnings.warn(
"display: specified urls are ignored when local=True")
kwargs['d3_url'], kwargs['mpld3_url'] = write_ipynb_local_js()
if fig is None:
fig = plt.gcf()
if closefig:
plt.close(fig)
return HTML(fig_to_html(fig, **kwargs))
def show(fig=None, ip='127.0.0.1', port=8888, n_retries=50,
local=True, open_browser=True, http_server=None, **kwargs):
"""Open figure in a web browser
Similar behavior to plt.show(). This opens the D3 visualization of the
specified figure in the web browser. On most platforms, the browser
will open automatically.
Parameters
----------
fig : matplotlib figure
The figure to display. If not specified, the current active figure
will be used.
ip : string, default = '127.0.0.1'
the ip address used for the local server
port : int, default = 8888
the port number to use for the local server. If already in use,
a nearby open port will be found (see n_retries)
n_retries : int, default = 50
the maximum number of ports to try when locating an empty port.
local : bool, default = True
if True, use the local d3 & mpld3 javascript versions, within the
js/ folder. If False, use the standard urls.
open_browser : bool (optional)
if True (default), then open a web browser to the given HTML
http_server : class (optional)
optionally specify an HTTPServer class to use for showing the
figure. The default is Python's basic HTTPServer.
**kwargs :
additional keyword arguments are passed through to :func:`fig_to_html`
See Also
--------
:func:`display` : embed figure within the IPython notebook
:func:`enable_notebook` : automatically embed figures in IPython notebook
"""
if local:
kwargs['mpld3_url'] = '/mpld3.js'
kwargs['d3_url'] = '/d3.js'
files = {'/mpld3.js': ["text/javascript",
open(urls.MPLD3_LOCAL, 'r').read()],
'/d3.js': ["text/javascript",
open(urls.D3_LOCAL, 'r').read()]}
else:
files = None
if fig is None:
# import here, in case matplotlib.use(...) is called by user
import matplotlib.pyplot as plt
fig = plt.gcf()
html = fig_to_html(fig, **kwargs)
serve(html, ip=ip, port=port, n_retries=n_retries, files=files,
open_browser=open_browser, http_server=http_server)
def enable_notebook(local=False, **kwargs):
"""Enable the automatic display of figures in the IPython Notebook.
This function should be used with the inline Matplotlib backend
that ships with IPython that can be enabled with `%pylab inline`
or `%matplotlib inline`. This works by adding an HTML formatter
for Figure objects; the existing SVG/PNG formatters will remain
enabled.
Parameters
----------
local : boolean (optional, default=False)
if True, then copy the d3 & mpld3 libraries to a location visible to
the notebook server, and source them from there. See Notes below.
**kwargs :
all keyword parameters are passed through to :func:`fig_to_html`
Notes
-----
Known issues: using ``local=True`` may not work correctly in certain cases:
- In IPython < 2.0, ``local=True`` may fail if the current working
directory is changed within the notebook (e.g. with the %cd command).
- In IPython 2.0+, ``local=True`` may fail if a url prefix is added
(e.g. by setting NotebookApp.base_url).
See Also
--------
:func:`disable_notebook` : undo the action of enable_notebook
:func:`display` : embed figure within the IPython notebook
:func:`show` : launch a local server and show a figure in a browser
"""
try:
from IPython.core.getipython import get_ipython
from matplotlib.figure import Figure
except ImportError:
raise ImportError('This feature requires IPython 1.0+ and Matplotlib')
if local:
if 'mpld3_url' in kwargs or 'd3_url' in kwargs:
warnings.warn(
"enable_notebook: specified urls are ignored when local=True")
kwargs['d3_url'], kwargs['mpld3_url'] = write_ipynb_local_js()
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.for_type(Figure,
lambda fig, kwds=kwargs: fig_to_html(fig, **kwds))
def disable_notebook():
"""Disable the automatic display of figures in the IPython Notebook.
See Also
--------
:func:`enable_notebook` : automatically embed figures in IPython notebook
"""
try:
from IPython.core.getipython import get_ipython
from matplotlib.figure import Figure
except ImportError:
raise ImportError('This feature requires IPython 1.0+ and Matplotlib')
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.type_printers.pop(Figure, None)
def save_html(fig, fileobj, **kwargs):
"""Save a matplotlib figure to an html file
Parameters
----------
fig : matplotlib Figure instance
The figure to write to file.
fileobj : filename or file object
The filename or file-like object in which to write the HTML
representation of the figure.
**kwargs :
additional keyword arguments will be passed to :func:`fig_to_html`
See Also
--------
:func:`save_json`: save json representation of a figure to file
:func:`fig_to_html` : output html representation of the figure
:func:`fig_to_dict` : output dictionary representation of the figure
"""
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
if not hasattr(fileobj, 'write'):
raise ValueError("fileobj should be a filename or a writable file")
fileobj.write(fig_to_html(fig, **kwargs))
def save_json(fig, fileobj, **kwargs):
"""Save a matplotlib figure to a json file.
Note that any plugins which depend on generated HTML will not be included
in the JSON encoding.
Parameters
----------
fig : matplotlib Figure instance
The figure to write to file.
fileobj : filename or file object
The filename or file-like object in which to write the HTML
representation of the figure.
**kwargs :
additional keyword arguments will be passed to :func:`fig_to_dict`
See Also
--------
:func:`save_html` : save html representation of a figure to file
:func:`fig_to_html` : output html representation of the figure
:func:`fig_to_dict` : output dictionary representation of the figure
"""
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
if not hasattr(fileobj, 'write'):
raise ValueError("fileobj should be a filename or a writable file")
json.dump(fig_to_dict(fig, **kwargs), fileobj)
# Deprecated versions of these functions
show_d3 = deprecated(show, "mpld3.show_d3", "mpld3.show")
fig_to_d3 = deprecated(fig_to_html, "mpld3.fig_to_d3", "mpld3.fig_to_html")
display_d3 = deprecated(display, "mpld3.display_d3", "mpld3.display")
| bsd-3-clause |
mtagle/airflow | tests/providers/sqlite/hooks/test_sqlite.py | 5 | 3266 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from unittest.mock import patch
from airflow.models import Connection
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
class TestSqliteHookConn(unittest.TestCase):
def setUp(self):
self.connection = Connection(host='host')
class UnitTestSqliteHook(SqliteHook):
conn_name_attr = 'sqlite_conn_id'
self.db_hook = UnitTestSqliteHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@patch('airflow.providers.sqlite.hooks.sqlite.sqlite3.connect')
def test_get_conn(self, mock_connect):
self.db_hook.get_conn()
mock_connect.assert_called_once_with('host')
class TestSqliteHook(unittest.TestCase):
def setUp(self):
self.cur = mock.MagicMock()
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestSqliteHook(SqliteHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
self.db_hook = UnitTestSqliteHook()
def test_get_first_record(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
self.assertEqual(result_sets[0], self.db_hook.get_first(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
self.assertEqual(result_sets, self.db_hook.get_records(statement))
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook.get_pandas_df(statement)
self.assertEqual(column, df.columns[0])
self.assertEqual(result_sets[0][0], df.values.tolist()[0][0])
self.assertEqual(result_sets[1][0], df.values.tolist()[1][0])
self.cur.execute.assert_called_once_with(statement)
| apache-2.0 |
google/brain-tokyo-workshop | WANNRelease/prettyNEAT/domain/classify_gym.py | 2 | 4681 | import logging
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import sys
import cv2
import math
class ClassifyEnv(gym.Env):
"""Classification as an unsupervised OpenAI Gym RL problem.
Includes scikit-learn digits dataset, MNIST dataset
"""
def __init__(self, trainSet, target):
"""
Data set is a tuple of
[0] input data: [nSamples x nInputs]
[1] labels: [nSamples x 1]
Example data sets are given at the end of this file
"""
self.t = 0 # Current batch number
self.t_limit = 0 # Number of batches if you need them
self.batch = 1000 # Number of images per batch
self.seed()
self.viewer = None
self.trainSet = trainSet
self.target = target
nInputs = np.shape(trainSet)[1]
high = np.array([1.0]*nInputs)
self.action_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.observation_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.state = None
self.trainOrder = None
self.currIndx = None
def seed(self, seed=None):
''' Randomly select from training set'''
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
''' Initialize State'''
#print('Lucky number', np.random.randint(10)) # same randomness?
self.trainOrder = np.random.permutation(len(self.target))
self.t = 0 # timestep
self.currIndx = self.trainOrder[self.t:self.t+self.batch]
self.state = self.trainSet[self.currIndx,:]
return self.state
def step(self, action):
'''
Judge Classification, increment to next batch
action - [batch x output] - softmax output
'''
y = self.target[self.currIndx]
m = y.shape[0]
log_likelihood = -np.log(action[range(m),y])
loss = np.sum(log_likelihood) / m
reward = -loss
if self.t_limit > 0: # We are doing batches
reward *= (1/self.t_limit) # average
self.t += 1
done = False
if self.t >= self.t_limit:
done = True
self.currIndx = self.trainOrder[(self.t*self.batch):\
(self.t*self.batch + self.batch)]
self.state = self.trainSet[self.currIndx,:]
else:
done = True
obs = self.state
return obs, reward, done, {}
# -- Data Sets ----------------------------------------------------------- -- #
def digit_raw():
'''
Converts 8x8 scikit digits to
[samples x pixels] ([N X 64])
'''
from sklearn import datasets
digits = datasets.load_digits()
z = (digits.images/16)
z = z.reshape(-1, (64))
return z, digits.target
def mnist_784():
'''
Converts 28x28 mnist digits to
[samples x pixels] ([N X 784])
'''
import mnist
z = (mnist.train_images()/255)
z = preprocess(z,(28,28))
z = z.reshape(-1, (784))
return z, mnist.train_labels()
def mnist_256():
'''
Converts 28x28 mnist digits to [16x16]
[samples x pixels] ([N X 256])
'''
import mnist
z = (mnist.train_images()/255)
z = preprocess(z,(16,16))
z = z.reshape(-1, (256))
return z, mnist.train_labels()
def preprocess(img,size, patchCorner=(0,0), patchDim=None, unskew=True):
"""
Resizes, crops, and unskewes images
"""
if patchDim == None: patchDim = size
nImg = np.shape(img)[0]
procImg = np.empty((nImg,size[0],size[1]))
# Unskew and Resize
if unskew == True:
for i in range(nImg):
procImg[i,:,:] = deskew(cv2.resize(img[i,:,:],size),size)
# Crop
cropImg = np.empty((nImg,patchDim[0],patchDim[1]))
for i in range(nImg):
cropImg[i,:,:] = procImg[i,patchCorner[0]:patchCorner[0]+patchDim[0],\
patchCorner[1]:patchCorner[1]+patchDim[1]]
procImg = cropImg
return procImg
def deskew(image, image_shape, negated=True):
"""
This method deskwes an image using moments
:param image: a numpy nd array input image
:param image_shape: a tuple denoting the image`s shape
:param negated: a boolean flag telling whether the input image is negated
:returns: a numpy nd array deskewd image
source: https://github.com/vsvinayak/mnist-helper
"""
# negate the image
if not negated:
image = 255-image
# calculate the moments of the image
m = cv2.moments(image)
if abs(m['mu02']) < 1e-2:
return image.copy()
# caclulating the skew
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*image_shape[0]*skew], [0,1,0]])
img = cv2.warpAffine(image, M, image_shape, \
flags=cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR)
return img
| apache-2.0 |
rhiever/sklearn-benchmarks | model_code/random_search_preprocessing/SGDClassifier.py | 1 | 3065 | import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import Binarizer, MaxAbsScaler, MinMaxScaler
from sklearn.preprocessing import Normalizer, PolynomialFeatures, RobustScaler, StandardScaler
from sklearn.decomposition import FastICA, PCA
from sklearn.kernel_approximation import RBFSampler, Nystroem
from sklearn.cluster import FeatureAgglomeration
from sklearn.feature_selection import SelectFwe, SelectPercentile, VarianceThreshold
from sklearn.feature_selection import SelectFromModel, RFE
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.linear_model import SGDClassifier
from evaluate_model import evaluate_model
dataset = sys.argv[1]
num_param_combinations = int(sys.argv[2])
random_seed = int(sys.argv[3])
preprocessor_num = int(sys.argv[4])
np.random.seed(random_seed)
preprocessor_list = [Binarizer, MaxAbsScaler, MinMaxScaler, Normalizer,
PolynomialFeatures, RobustScaler, StandardScaler,
FastICA, PCA, RBFSampler, Nystroem, FeatureAgglomeration,
SelectFwe, SelectPercentile, VarianceThreshold,
SelectFromModel, RFE]
chosen_preprocessor = preprocessor_list[preprocessor_num]
pipeline_components = [chosen_preprocessor, SGDClassifier]
pipeline_parameters = {}
loss_values = np.random.choice(['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'], size=num_param_combinations)
penalty_values = np.random.choice(['l2', 'l1', 'elasticnet'], size=num_param_combinations)
alpha_values = np.random.exponential(scale=0.01, size=num_param_combinations)
learning_rate_values = np.random.choice(['constant', 'optimal', 'invscaling'], size=num_param_combinations)
fit_intercept_values = np.random.choice([True, False], size=num_param_combinations)
l1_ratio_values = np.random.uniform(low=0., high=1., size=num_param_combinations)
eta0_values = np.random.uniform(low=0., high=5., size=num_param_combinations)
power_t_values = np.random.uniform(low=0., high=5., size=num_param_combinations)
all_param_combinations = zip(loss_values, penalty_values, alpha_values, learning_rate_values, fit_intercept_values, l1_ratio_values, eta0_values, power_t_values)
pipeline_parameters[SGDClassifier] = \
[{'loss': loss, 'penalty': penalty, 'alpha': alpha, 'learning_rate': learning_rate, 'fit_intercept': fit_intercept,
'l1_ratio': 0.15 if penalty != 'elasticnet' else l1_ratio, 'eta0': 0. if learning_rate not in ['constant', 'invscaling'] else eta0,
'power_t': 0.5 if learning_rate != 'invscaling' else power_t, 'random_state': 324089}
for (loss, penalty, alpha, learning_rate, fit_intercept, l1_ratio, eta0, power_t) in all_param_combinations]
if chosen_preprocessor is SelectFromModel:
pipeline_parameters[SelectFromModel] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
elif chosen_preprocessor is RFE:
pipeline_parameters[RFE] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
| mit |
TinyOS-Camp/DDEA-DEV | Archive/[14_10_24] Demo_Version/radar_example.py | 10 | 1434 | import numpy as np
import matplotlib.pyplot as plt
import pprint
import mytool as mt
import radar_chart
# Load from binaries
avgsensor_names = mt.loadObjectBinary("tmp/avgsensor_names.bin")
Conditions_dict = mt.loadObjectBinary("tmp/Conditions_dict.bin")
Events_dict = mt.loadObjectBinary("tmp/Events_dict.bin")
wf_tuple_t = mt.loadObjectBinary("tmp/wf_tuple_t.bin")
wf_tuple_d = mt.loadObjectBinary("tmp/wf_tuple_d.bin")
wf_tuple_h = mt.loadObjectBinary("tmp/wf_tuple_h.bin")
wf_tuple_e = mt.loadObjectBinary("tmp/wf_tuple_e.bin")
wf_tuple_c = mt.loadObjectBinary("tmp/wf_tuple_c.bin")
sensor_no = len(avgsensor_names)
# convert 'inf' to 1
sen_t = [1 if val == float("inf") else val for val in wf_tuple_t[3]]
sen_d = [1 if val == float("inf") else val for val in wf_tuple_d[3]]
sen_h = [1 if val == float("inf") else val for val in wf_tuple_h[3]]
sen_e = [1 if val == float("inf") else val for val in wf_tuple_e[3]]
sen_c = [1 if val == float("inf") else val for val in wf_tuple_c[3]]
SEN = [[sen_t[i], sen_d[i], sen_h[i], sen_e[i], sen_c[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-6:] # Best 6 sensors
spoke_labels = ["Temperature", "Dew Point", "Humidity", "Events", "Conditions"]
data = [SEN[i] for i in idx]
sensor_labels = [avgsensor_names[i] for i in idx]
radar_chart.plot(data, spoke_labels, sensor_labels, saveto="radar.png")
# plt.show()
| gpl-2.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/util/doctools.py | 11 | 6612 | import numpy as np
import pandas as pd
import pandas.compat as compat
class TablePlotter(object):
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df):
"""Calcurate table chape considering index levels"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical):
"""Calcurate appropriate figure size based on left and right data"""
if vertical:
# calcurate required number of cells
vcells = max(sum([self._shape(l)[0] for l in left]), self._shape(right)[0])
hcells = max([self._shape(l)[1] for l in left]) + self._shape(right)[1]
else:
vcells = max([self._shape(l)[0] for l in left] + [self._shape(right)[0]])
hcells = sum([self._shape(l)[1] for l in left] + [self._shape(right)[1]])
return hcells, vcells
def plot(self, left, right, labels=None, vertical=True):
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if not isinstance(left, list):
left = [left]
left = [self._conv(l) for l in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max([self._shape(l)[1] for l in left])
max_left_rows = max([self._shape(l)[0] for l in left])
for i, (l, label) in enumerate(zip(left, labels)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, l, title=label, height=1.0/max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title='Result', height=1.05/vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max([self._shape(df)[0] for df in left + [right]])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for l, label in zip(left, labels):
sp = self._shape(l)
ax = fig.add_subplot(gs[0, i:i+sp[1]])
self._make_table(ax, l, title=label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title='Result', height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""Convert each input to appropriate for table outplot"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name='')
else:
data = data.to_frame()
data = data.fillna('NaN')
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, 'Index', data.index)
else:
for i in range(idx_nlevels):
data.insert(i, 'Index{0}'.format(i), data.index.get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns.get_level_values(0)
values = [data.columns.get_level_values(i).values for i in range(1, col_nlevels)]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title, height=None):
if df is None:
ax.set_visible(False)
return
import pandas.tools.plotting as plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in compat.iteritems(props['celld']):
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor('#AAAAAA')
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis('off')
if __name__ == "__main__":
import pandas as pd
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({'A': [10, 11, 12],
'B': [20, 21, 22],
'C': [30, 31, 32]})
df2 = pd.DataFrame({'A': [10, 12],
'C': [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]),
labels=['df1', 'df2'], vertical=True)
plt.show()
df3 = pd.DataFrame({'X': [10, 12],
'Z': [30, 32]})
p.plot([df1, df3], pd.concat([df1, df3], axis=1),
labels=['df1', 'df2'], vertical=False)
plt.show()
idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'),
(2, 'A'), (2, 'B'), (2, 'C')])
col = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')])
df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6],
'v2': [5, 6, 7, 8, 9, 10]},
index=idx)
df3.columns = col
p.plot(df3, df3, labels=['df3'])
plt.show()
| apache-2.0 |
nmartensen/pandas | pandas/core/dtypes/generic.py | 9 | 3256 | """ define generic base classes for pandas objects """
# define abstract base classes to enable isinstance type checking on our
# objects
def create_pandas_abc_type(name, attr, comp):
@classmethod
def _check(cls, inst):
return getattr(inst, attr, '_typ') in comp
dct = dict(__instancecheck__=_check, __subclasscheck__=_check)
meta = type("ABCBase", (type, ), dct)
return meta(name, tuple(), dct)
ABCIndex = create_pandas_abc_type("ABCIndex", "_typ", ("index", ))
ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ",
("int64index", ))
ABCUInt64Index = create_pandas_abc_type("ABCUInt64Index", "_typ",
("uint64index", ))
ABCRangeIndex = create_pandas_abc_type("ABCRangeIndex", "_typ",
("rangeindex", ))
ABCFloat64Index = create_pandas_abc_type("ABCFloat64Index", "_typ",
("float64index", ))
ABCMultiIndex = create_pandas_abc_type("ABCMultiIndex", "_typ",
("multiindex", ))
ABCDatetimeIndex = create_pandas_abc_type("ABCDatetimeIndex", "_typ",
("datetimeindex", ))
ABCTimedeltaIndex = create_pandas_abc_type("ABCTimedeltaIndex", "_typ",
("timedeltaindex", ))
ABCPeriodIndex = create_pandas_abc_type("ABCPeriodIndex", "_typ",
("periodindex", ))
ABCCategoricalIndex = create_pandas_abc_type("ABCCategoricalIndex", "_typ",
("categoricalindex", ))
ABCIntervalIndex = create_pandas_abc_type("ABCIntervalIndex", "_typ",
("intervalindex", ))
ABCIndexClass = create_pandas_abc_type("ABCIndexClass", "_typ",
("index", "int64index", "rangeindex",
"float64index", "uint64index",
"multiindex", "datetimeindex",
"timedeltaindex", "periodindex",
"categoricalindex", "intervalindex"))
ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series", ))
ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe", ))
ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel", "panel4d"))
ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp",
('sparse_series',
'sparse_time_series'))
ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp",
('sparse_array', 'sparse_series'))
ABCCategorical = create_pandas_abc_type("ABCCategorical", "_typ",
("categorical"))
ABCPeriod = create_pandas_abc_type("ABCPeriod", "_typ", ("period", ))
ABCDateOffset = create_pandas_abc_type("ABCDateOffset", "_typ",
("dateoffset",))
class _ABCGeneric(type):
def __instancecheck__(cls, inst):
return hasattr(inst, "_data")
ABCGeneric = _ABCGeneric("ABCGeneric", tuple(), {})
| bsd-3-clause |
chrisburr/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
WZBSocialScienceCenter/pdftabextract | examples/schoollist_1/schoollist_1.py | 1 | 15786 | # -*- coding: utf-8 -*-
"""
An example script that shows how to extract tabular data from OCR-scanned *double* pages with lists of public
schools in Germany.
It includes the following stages:
1. Load the XML describing the pages and text boxes (the XML was generated from the OCR scanned PDF with poppler
utils (pdftohtml command))
2. Split the scanned double pages so that we can later process the lists page-by-page
3. Detect clusters of horizontal lines using the image processing module and repair rotated pages
4. Get column and line positions of all pages (for lines/rows using the detected horizontal lines and for columns
by analyzing the distribution of text box x-positions)
5. Create a grid of columns and lines for each page
6. Match the text boxes into the grid and hence extract the tabular data, storing it into a pandas DataFrame
Feb. 2017, WZB Berlin Social Science Center - https://wzb.eu
@author: Markus Konrad <[email protected]>
"""
import os
import re
from math import radians, degrees
import numpy as np
import pandas as pd
import cv2
from pdftabextract import imgproc
from pdftabextract.geom import pt
from pdftabextract.common import read_xml, parse_pages, save_page_grids
from pdftabextract.textboxes import rotate_textboxes, sorted_by_attr
from pdftabextract.clustering import (find_clusters_1d_break_dist,
calc_cluster_centers_1d,
zip_clusters_and_values)
from pdftabextract.splitpages import split_page_texts, create_split_pages_dict_structure
from pdftabextract.extract import make_grid_from_positions, fit_texts_into_grid, datatable_to_dataframe
#%% Some constants
DATAPATH = 'data/'
OUTPUTPATH = 'generated_output/'
INPUT_XML = 'schoollist_1.pdf.xml'
MIN_ROW_HEIGHT = 260 # <- very important. the minimum height of a row in pixels, measured in the scanned pages
MIN_COL_WIDTH = 194 # <- very important. the minimum width of a column in pixels, measured in the scanned pages
#%% Some helper functions
def save_image_w_lines(iproc_obj, imgfilebasename, orig_img_as_background, file_suffix_prefix=''):
file_suffix = 'lines-orig' if orig_img_as_background else 'lines'
img_lines = iproc_obj.draw_lines(orig_img_as_background=orig_img_as_background)
img_lines_file = os.path.join(OUTPUTPATH, '%s-%s.png' % (imgfilebasename, file_suffix_prefix + file_suffix))
print("> saving image with detected lines to '%s'" % img_lines_file)
cv2.imwrite(img_lines_file, img_lines)
#%% Read the XML
# Load the XML that was generated with pdftohtml
xmltree, xmlroot = read_xml(os.path.join(DATAPATH, INPUT_XML))
# parse it and generate a dict of pages
pages = parse_pages(xmlroot, require_image=True)
#%% Split the scanned double pages so that we can later process the lists page-by-page
split_texts_and_images = [] # list of tuples with (double page, split text boxes, split images)
for p_num, p in pages.items():
# get the image file of the scanned page
imgfilebasename = p['image'][:p['image'].rindex('.')]
imgfile = os.path.join(DATAPATH, p['image'])
print("page %d: detecting lines in image file '%s'..." % (p_num, imgfile))
# create an image processing object with the scanned page
iproc_obj = imgproc.ImageProc(imgfile)
# calculate the scaling of the image file in relation to the text boxes coordinate system dimensions
page_scaling_x = iproc_obj.img_w / p['width']
page_scaling_y = iproc_obj.img_h / p['height']
image_scaling = (page_scaling_x, # scaling in X-direction
page_scaling_y) # scaling in Y-direction
# detect the lines in the double pages
lines_hough = iproc_obj.detect_lines(canny_low_thresh=50, canny_high_thresh=150, canny_kernel_size=3,
hough_rho_res=1,
hough_theta_res=np.pi/500,
hough_votes_thresh=350)
print("> found %d lines" % len(lines_hough))
save_image_w_lines(iproc_obj, imgfilebasename, True, 'bothpages-')
# find the vertical line that separates both sides
sep_line_img_x = iproc_obj.find_pages_separator_line(dist_thresh=MIN_COL_WIDTH/2)
sep_line_page_x = sep_line_img_x / page_scaling_x
print("> found pages separator line at %f (image space position) / %f (page space position)"
% (sep_line_img_x, sep_line_page_x))
# split the scanned double page at the separator line
split_images = iproc_obj.split_image(sep_line_img_x)
# split the textboxes at the separator line
split_texts = split_page_texts(p, sep_line_page_x)
split_texts_and_images.append((p, split_texts, split_images))
# generate a new XML and "pages" dict structure from the split pages
split_pages_xmlfile = os.path.join(OUTPUTPATH, INPUT_XML[:INPUT_XML.rindex('.')] + '.split.xml')
print("> saving split pages XML to '%s'" % split_pages_xmlfile)
split_tree, split_root, split_pages = create_split_pages_dict_structure(split_texts_and_images,
save_to_output_path=split_pages_xmlfile)
# we don't need the original double pages any more, we'll work with 'split_pages'
del pages
#%% Detect clusters of horizontal lines using the image processing module and rotate back or deskew pages
hori_lines_clusters = {}
pages_image_scaling = {} # scaling of the scanned page image in relation to the OCR page dimensions for each page
for p_num, p in split_pages.items():
# get the image file of the scanned page
imgfilebasename = p['image'][:p['image'].rindex('.')]
imgfile = os.path.join(OUTPUTPATH, p['image'])
print("page %d: detecting lines in image file '%s'..." % (p_num, imgfile))
# create an image processing object with the scanned page
iproc_obj = imgproc.ImageProc(imgfile)
# calculate the scaling of the image file in relation to the text boxes coordinate system dimensions
page_scaling_x = iproc_obj.img_w / p['width']
page_scaling_y = iproc_obj.img_h / p['height']
pages_image_scaling[p_num] = (page_scaling_x, # scaling in X-direction
page_scaling_y) # scaling in Y-direction
# detect the lines
lines_hough = iproc_obj.detect_lines(canny_low_thresh=50, canny_high_thresh=150, canny_kernel_size=3,
hough_rho_res=1,
hough_theta_res=np.pi/500,
hough_votes_thresh=round(0.2 * iproc_obj.img_w))
print("> found %d lines" % len(lines_hough))
save_image_w_lines(iproc_obj, imgfilebasename, True)
save_image_w_lines(iproc_obj, imgfilebasename, False)
# find rotation or skew
# the parameters are:
# 1. the minimum threshold in radians for a rotation to be counted as such
# 2. the maximum threshold for the difference between horizontal and vertical line rotation (to detect skew)
# 3. an optional threshold to filter out "stray" lines whose angle is too far apart from the median angle of
# all other lines that go in the same direction (no effect here)
rot_or_skew_type, rot_or_skew_radians = iproc_obj.find_rotation_or_skew(radians(0.5), # uses "lines_hough"
radians(1),
omit_on_rot_thresh=radians(0.5))
# rotate back text boxes
# since often no vertical lines can be detected and hence it cannot be determined if the page is rotated or skewed,
# we assume that it's always rotated
if rot_or_skew_type is not None:
print("> rotating back by %f°" % -degrees(rot_or_skew_radians))
rotate_textboxes(p, -rot_or_skew_radians, pt(0, 0))
# rotate back detected lines
lines_hough = iproc_obj.apply_found_rotation_or_skew(rot_or_skew_type, -rot_or_skew_radians)
save_image_w_lines(iproc_obj, imgfilebasename + '-repaired', True)
save_image_w_lines(iproc_obj, imgfilebasename + '-repaired', False)
# cluster the detected *horizontal* lines using find_clusters_1d_break_dist as simple clustering function
# (break on distance MIN_ROW_HEIGHT/2)
# additionally, remove all cluster sections that are considered empty
# a cluster is considered empty when the number of text boxes in it is below 10% of the median number of text boxes
# per cluster section
hori_clusters = iproc_obj.find_clusters(imgproc.DIRECTION_HORIZONTAL, find_clusters_1d_break_dist,
remove_empty_cluster_sections_use_texts=p['texts'], # use this page's textboxes
remove_empty_cluster_sections_n_texts_ratio=0.1, # 10% rule
remove_empty_cluster_sections_scaling=page_scaling_y, # the positions are in "scanned image space" -> we scale them to "text box space"
dist_thresh=MIN_ROW_HEIGHT/2)
print("> found %d clusters" % len(hori_clusters))
if len(hori_clusters) > 0:
# draw the clusters
img_w_clusters = iproc_obj.draw_line_clusters(imgproc.DIRECTION_HORIZONTAL, hori_clusters)
save_img_file = os.path.join(OUTPUTPATH, '%s-hori-clusters.png' % imgfilebasename)
print("> saving image with detected horizontal clusters to '%s'" % save_img_file)
cv2.imwrite(save_img_file, img_w_clusters)
hori_lines_clusters[p_num] = hori_clusters
else:
print("> no horizontal line clusters found")
# save split and repaired XML (i.e. XML with deskewed textbox positions)
output_files_basename = INPUT_XML[:INPUT_XML.rindex('.')]
repaired_xmlfile = os.path.join(OUTPUTPATH, output_files_basename + '.split.repaired.xml')
print("saving split and repaired XML file to '%s'..." % repaired_xmlfile)
split_tree.write(repaired_xmlfile)
#%% Determine the rows and columns of the tables
pttrn_schoolnum = re.compile(r'^\d{6}$') # a valid school number indicates a table row
page_grids = {}
print("detecting rows and columns...")
for p_num, p in split_pages.items():
scaling_x, scaling_y = pages_image_scaling[p_num]
# try to find out the table rows in this page using the horizontal lines that were detected before
hori_lines = list(np.array(calc_cluster_centers_1d(hori_lines_clusters[p_num])) / scaling_y)
hori_lines.append(p['height']) # last line: page bottom
prev_line_y = 0
row_texts = []
row_positions = []
in_table = False # is True when the current segment is a real table row (not a table header or surrounding text)
for line_y in hori_lines:
# get all texts in this row
segment_texts = [t for t in p['texts'] if prev_line_y < t['bottom'] <= line_y]
if not segment_texts: continue # skip empty rows
# try to find the start and the end of the table
for t in segment_texts:
t_val = t['value'].strip()
if pttrn_schoolnum.search(t_val): # if this matches, we found the start of the table
if not in_table:
in_table = True
row_positions.append(prev_line_y)
break
else:
if in_table: # we found the end of the table
in_table = False
if in_table: # this is a table row, so add the texts and row positions to the respective lists
row_texts.append(segment_texts)
row_positions.append(line_y)
prev_line_y = line_y
# try to find out the table columns in this page using the distribution of x-coordinates of the left position of
# each text box in all rows
text_xs = []
for texts in row_texts:
text_xs.extend([t['left'] for t in texts])
text_xs = np.array(text_xs)
# make clusters of x positions
text_xs_clusters = find_clusters_1d_break_dist(text_xs, dist_thresh=MIN_COL_WIDTH/2/scaling_x)
text_xs_clusters_w_values = zip_clusters_and_values(text_xs_clusters, text_xs)
col_positions = calc_cluster_centers_1d(text_xs_clusters_w_values)
# remove falsely identified columns (i.e. merge columns with only a few text boxes)
filtered_col_positions = []
n_rows = len(row_positions)
n_cols = len(col_positions)
if n_cols > 1 and n_rows > 1:
top_y = row_positions[0]
bottom_y = row_positions[-1]
# append the rightmost text's right border as the last column border
rightmost_pos = sorted_by_attr(p['texts'], 'right')[-1]['right']
col_positions.append(rightmost_pos)
# merge columns with few text boxes
texts_in_table = [t for t in p['texts'] if top_y < t['top'] + t['height']/2 <= bottom_y]
prev_col_x = col_positions[0]
for col_x in col_positions[1:]:
col_texts = [t for t in texts_in_table if prev_col_x < t['left'] + t['width']/2 <= col_x]
if len(col_texts) >= n_rows: # there should be at least one text box per row
filtered_col_positions.append(prev_col_x)
last_col_x = col_x
prev_col_x = col_x
# manually add border for the last column because it has very few or no text boxes
filtered_col_positions.append(filtered_col_positions[-1] + (rightmost_pos - filtered_col_positions[-1]) / 2)
filtered_col_positions.append(rightmost_pos)
# create the grid
if filtered_col_positions:
grid = make_grid_from_positions(filtered_col_positions, row_positions)
n_rows = len(grid)
n_cols = len(grid[0])
print("> page %d: grid with %d rows, %d columns" % (p_num, n_rows, n_cols))
page_grids[p_num] = grid
else: # this happens for the first page as there's no table on that
print("> page %d: no table found" % p_num)
# save the page grids
# After you created the page grids, you should then check that they're correct using pdf2xml-viewer's
# loadGridFile() function
page_grids_file = os.path.join(OUTPUTPATH, output_files_basename + '.pagegrids.json')
print("saving page grids JSON file to '%s'" % page_grids_file)
save_page_grids(page_grids, page_grids_file)
#%% Create data frames (requires pandas library)
# For sake of simplicity, we will just fit the text boxes into the grid, merge the texts in their cells (splitting text
# boxes to separate lines if necessary) and output the result. Normally, you would do some more parsing here, e.g.
# extracting the address components from the second column.
full_df = pd.DataFrame()
print("fitting text boxes into page grids and generating final output...")
for p_num, p in split_pages.items():
if p_num not in page_grids: continue # happens when no table was detected
print("> page %d" % p_num)
datatable, unmatched_texts = fit_texts_into_grid(p['texts'], page_grids[p_num], return_unmatched_texts=True)
df = datatable_to_dataframe(datatable, split_texts_in_lines=True)
df['from_page'] = p_num
full_df = full_df.append(df, ignore_index=True)
print("extracted %d rows from %d pages" % (len(full_df), len(split_pages)))
csv_output_file = os.path.join(OUTPUTPATH, output_files_basename + '.csv')
print("saving extracted data to '%s'" % csv_output_file)
full_df.to_csv(csv_output_file, index=False)
excel_output_file = os.path.join(OUTPUTPATH, output_files_basename + '.xlsx')
print("saving extracted data to '%s'" % excel_output_file)
full_df.to_excel(excel_output_file, index=False)
| apache-2.0 |
Vishruit/DDP_models | data_extraction_code/ddp-5feb-dataprocess.py | 1 | 22058 | # IPython log file
run dataExtractionAutomator.py
run dataExtractionAutomator.py
import h5py
ls
cd E
cd Extracted\ Data
ls
np.load(we.npy)
np.load('we.npy')
#[Out]# array(['qwerty', 'asdfgh'],
#[Out]# dtype='|S6')
t = np.load('we.npy')
t
#[Out]# array(['qwerty', 'asdfgh'],
#[Out]# dtype='|S6')
type(t)
#[Out]# <type 'numpy.ndarray'>
type(t[:])
#[Out]# <type 'numpy.ndarray'>
type(t[:2])
#[Out]# <type 'numpy.ndarray'>
type(t[:3])
#[Out]# <type 'numpy.ndarray'>
f = h5py.File('we.npy', 'w')
f
#[Out]# <HDF5 file "we.npy" (mode r+)>
f[1]
f[:]
logstart?
logstart?
logstart?
# Sun, 05 Feb 2017 15:30:27
logstart -o -r -t ddp-5feb-dataprocess.py append
# Sun, 05 Feb 2017 15:30:33
ls
# Sun, 05 Feb 2017 15:30:38
ls -l
# Sun, 05 Feb 2017 15:30:47
!head
# Sun, 05 Feb 2017 15:31:09
ls
# Sun, 05 Feb 2017 15:31:17
!head ddp-5feb-dataprocess.py
# Sun, 05 Feb 2017 15:33:09
import h5py
# Sun, 05 Feb 2017 15:33:35
history
# Sun, 05 Feb 2017 15:34:07
f
#[Out]# <HDF5 file "we.npy" (mode r+)>
# Sun, 05 Feb 2017 15:34:12
f.filename
#[Out]# u'we.npy'
# Sun, 05 Feb 2017 15:34:24
f.items
#[Out]# <bound method File.items of <HDF5 file "we.npy" (mode r+)>>
# Sun, 05 Feb 2017 15:34:29
ls
# Sun, 05 Feb 2017 16:13:02
def saveNPArray(data, filename, iter):
global file_Location_npy
filename = file_Location_npy[iter] + '\\' + filename
ensure_dir(filename)
np.save(filename,data)
pass
# Sun, 05 Feb 2017 16:13:22
saveNPArray([2 2], 'qw')
# Sun, 05 Feb 2017 16:13:32
t = [2 2]
# Sun, 05 Feb 2017 16:13:35
t = [2 ,2]
# Sun, 05 Feb 2017 16:13:40
saveNPArray(t, 'qw')
# Sun, 05 Feb 2017 16:14:11
def saveHDF5Array(data, filename, iter, f=None):
global file_Location_npy
filename = file_Location_npy[iter] + '\\' + filename
if !ensure_dir(filename):
f = h5py.File(filename, 'w')
return f
dataset = f.create_dataset("data", data = data)
# np.save(filename,data)
pass
# Sun, 05 Feb 2017 16:14:15
saveNPArray(t, 'qw')
# Sun, 05 Feb 2017 16:14:46
def saveHDF5Array(data, filename, iter, f=None):
global file_Location_npy
filename = file_Location_npy[iter] + '\\' + filename
if !ensure_dir(filename):
f = h5py.File(filename, 'w')
return f
dataset = f.create_dataset("data", data = data)
# np.save(filename,data)
pass
# Sun, 05 Feb 2017 16:15:02
def saveHDF5Array(data, filename, iter, f=None):
global file_Location_npy
filename = file_Location_npy[iter] + '\\' + filename
if not ensure_dir(filename):
f = h5py.File(filename, 'w')
return f
dataset = f.create_dataset("data", data = data)
# np.save(filename,data)
pass
# Sun, 05 Feb 2017 16:15:12
saveNPArray(t, 'qw')
# Sun, 05 Feb 2017 16:15:17
saveNPArray(t, 'qw',t)
# Sun, 05 Feb 2017 16:15:37
saveNPArray(t, 'qw',1)
# Sun, 05 Feb 2017 16:15:45
ls
# Sun, 05 Feb 2017 16:16:02
filename
# Sun, 05 Feb 2017 16:17:55
f
#[Out]# <HDF5 file "we.npy" (mode r+)>
# Sun, 05 Feb 2017 16:18:09
f = saveNPArray(t, 'qw',1)
# Sun, 05 Feb 2017 16:18:11
f
# Sun, 05 Feb 2017 16:18:19
type(f)
#[Out]# NoneType
# Sun, 05 Feb 2017 16:25:19
t
#[Out]# [2, 2]
# Sun, 05 Feb 2017 16:25:26
t = [t 4]
# Sun, 05 Feb 2017 16:25:34
t = t + [3 4]
# Sun, 05 Feb 2017 16:25:37
t = t + [3,4]
# Sun, 05 Feb 2017 16:25:40
t
#[Out]# [2, 2, 3, 4]
# Sun, 05 Feb 2017 16:25:52
t = t;[34]
#[Out]# [34]
# Sun, 05 Feb 2017 16:25:54
t
#[Out]# [2, 2, 3, 4]
# Sun, 05 Feb 2017 16:26:00
[3]
#[Out]# [3]
# Sun, 05 Feb 2017 16:26:20
t = [t; 1 ,2 ,3 ,4]
# Sun, 05 Feb 2017 16:26:22
t
#[Out]# [2, 2, 3, 4]
# Sun, 05 Feb 2017 16:26:56
t.append([1 2 3 4])
# Sun, 05 Feb 2017 16:27:02
t.append([1 ,2 ,3, 4])
# Sun, 05 Feb 2017 16:27:03
t
#[Out]# [2, 2, 3, 4, [1, 2, 3, 4]]
# Sun, 05 Feb 2017 16:27:16
t= []
# Sun, 05 Feb 2017 16:27:26
t.append([2,2])
# Sun, 05 Feb 2017 16:27:28
t.append([2,4])
# Sun, 05 Feb 2017 16:27:31
t.append([2,5])
# Sun, 05 Feb 2017 16:27:33
t
#[Out]# [[2, 2], [2, 4], [2, 5]]
# Sun, 05 Feb 2017 16:27:35
t[1]
#[Out]# [2, 4]
# Sun, 05 Feb 2017 16:27:40
t[0]
#[Out]# [2, 2]
# Sun, 05 Feb 2017 16:28:25
ls
# Sun, 05 Feb 2017 16:28:41
f = h5py.File('qwert.h5', 'w')
# Sun, 05 Feb 2017 16:28:43
ls
# Sun, 05 Feb 2017 16:28:51
t
#[Out]# [[2, 2], [2, 4], [2, 5]]
# Sun, 05 Feb 2017 16:28:59
dataset = f.create_dataset("data", data = t)
# Sun, 05 Feb 2017 16:29:04
dataset
#[Out]# <HDF5 dataset "data": shape (3, 2), type "<i4">
# Sun, 05 Feb 2017 16:29:50
f
#[Out]# <HDF5 file "qwert.h5" (mode r+)>
# Sun, 05 Feb 2017 16:29:53
f.
# Sun, 05 Feb 2017 16:30:26
f['data']
#[Out]# <HDF5 dataset "data": shape (3, 2), type "<i4">
# Sun, 05 Feb 2017 16:30:32
f['/data']
#[Out]# <HDF5 dataset "data": shape (3, 2), type "<i4">
# Sun, 05 Feb 2017 16:31:50
hdf5dump
# Sun, 05 Feb 2017 16:32:18
h5py.Dataset
#[Out]# h5py._hl.dataset.Dataset
# Sun, 05 Feb 2017 16:33:06
dataset.value
#[Out]# array([[2, 2],
#[Out]# [2, 4],
#[Out]# [2, 5]])
# Sun, 05 Feb 2017 16:42:44
f('data')
# Sun, 05 Feb 2017 16:43:02
f['data']
#[Out]# <HDF5 dataset "data": shape (3, 2), type "<i4">
# Sun, 05 Feb 2017 16:43:31
f['data'].append([4,4])
# Sun, 05 Feb 2017 16:43:44
x = f['data']
# Sun, 05 Feb 2017 16:43:48
type(x)
#[Out]# h5py._hl.dataset.Dataset
# Sun, 05 Feb 2017 16:45:24
f['data'].keys()
# Sun, 05 Feb 2017 16:45:37
f
#[Out]# <HDF5 file "qwert.h5" (mode r+)>
# Sun, 05 Feb 2017 16:45:48
f.keys()
#[Out]# [u'data']
# Sun, 05 Feb 2017 16:47:10
t = [[2,3;2,3],[2,4;4,5]]
# Sun, 05 Feb 2017 16:47:13
t
#[Out]# [[2, 2], [2, 4], [2, 5]]
# Sun, 05 Feb 2017 16:48:14
t = [[[2,3],[2,3]],[[2,4],[4,5]]]
# Sun, 05 Feb 2017 16:48:15
t
#[Out]# [[[2, 3], [2, 3]], [[2, 4], [4, 5]]]
# Sun, 05 Feb 2017 16:48:20
t.shap
# Sun, 05 Feb 2017 16:48:23
t.shape()
# Sun, 05 Feb 2017 16:48:24
t.shape
# Sun, 05 Feb 2017 16:49:06
t = [[[2,3][2,3]],[[2,4][4,5]]]
# Sun, 05 Feb 2017 16:49:11
t = [[[2,3] [2,3]],[[2,4] [4,5]]]
# Sun, 05 Feb 2017 16:52:15
dataset = f.create_dataset("data", data = t)
# Sun, 05 Feb 2017 16:57:57
dataset = f.f.\\\\\\\ ("data", data = t)
# Sun, 05 Feb 2017 16:58:01
f.close
#[Out]# <bound method File.close of <HDF5 file "qwert.h5" (mode r+)>>
# Sun, 05 Feb 2017 16:58:04
f.close()
# Sun, 05 Feb 2017 16:58:06
f
#[Out]# <Closed HDF5 file>
# Sun, 05 Feb 2017 16:58:16
ls
# Sun, 05 Feb 2017 16:58:30
f = h5py.File(filename, 'a')
# Sun, 05 Feb 2017 16:58:38
f = h5py.File('qwert.h5', 'w')
# Sun, 05 Feb 2017 16:58:49
dataset = f.create_dataset("data", data = data)
# Sun, 05 Feb 2017 16:58:54
dataset = f.create_dataset("data", data = t)
# Sun, 05 Feb 2017 16:58:55
dataset = f.create_dataset("data", data = t)
# Sun, 05 Feb 2017 16:59:07
f
#[Out]# <HDF5 file "qwert.h5" (mode r+)>
# Sun, 05 Feb 2017 16:59:13
f.keys
#[Out]# <bound method File.keys of <HDF5 file "qwert.h5" (mode r+)>>
# Sun, 05 Feb 2017 16:59:16
f.keys()
#[Out]# [u'data']
# Sun, 05 Feb 2017 16:59:27
f.close()
# Sun, 05 Feb 2017 17:00:06
!h5dump qwert.h5
# Sun, 05 Feb 2017 17:25:49
a = np.random.random(size=(2,2,2))
# Sun, 05 Feb 2017 17:25:50
a
#[Out]# array([[[ 0.65355133, 0.86820977],
#[Out]# [ 0.89261818, 0.55109183]],
#[Out]#
#[Out]# [[ 0.55809384, 0.06275186],
#[Out]# [ 0.7018475 , 0.67516623]]])
# Sun, 05 Feb 2017 17:26:04
a = np.random.random(size=(2,3,4))
# Sun, 05 Feb 2017 17:26:06
a
#[Out]# array([[[ 0.6743129 , 0.2235317 , 0.36808721, 0.79703754],
#[Out]# [ 0.33636327, 0.11517274, 0.45340833, 0.89546714],
#[Out]# [ 0.2575046 , 0.04164981, 0.66210853, 0.09263139]],
#[Out]#
#[Out]# [[ 0.13964075, 0.89193979, 0.71639801, 0.72148317],
#[Out]# [ 0.40059789, 0.33611509, 0.86973319, 0.95078016],
#[Out]# [ 0.32503119, 0.92155489, 0.5659259 , 0.5286179 ]]])
# Sun, 05 Feb 2017 17:26:32
a[1,:,:]
#[Out]# array([[ 0.13964075, 0.89193979, 0.71639801, 0.72148317],
#[Out]# [ 0.40059789, 0.33611509, 0.86973319, 0.95078016],
#[Out]# [ 0.32503119, 0.92155489, 0.5659259 , 0.5286179 ]])
# Sun, 05 Feb 2017 17:27:02
h5f = h5py.File('data.h5', 'a')
# Sun, 05 Feb 2017 17:27:03
ls
# Sun, 05 Feb 2017 17:27:33
h5f.create_dataset('dataset_1', data=a)
#[Out]# <HDF5 dataset "dataset_1": shape (2, 3, 4), type "<f8">
# Sun, 05 Feb 2017 17:28:20
a
#[Out]# array([[[ 0.6743129 , 0.2235317 , 0.36808721, 0.79703754],
#[Out]# [ 0.33636327, 0.11517274, 0.45340833, 0.89546714],
#[Out]# [ 0.2575046 , 0.04164981, 0.66210853, 0.09263139]],
#[Out]#
#[Out]# [[ 0.13964075, 0.89193979, 0.71639801, 0.72148317],
#[Out]# [ 0.40059789, 0.33611509, 0.86973319, 0.95078016],
#[Out]# [ 0.32503119, 0.92155489, 0.5659259 , 0.5286179 ]]])
# Sun, 05 Feb 2017 17:29:39
print(h5f.shape)
# Sun, 05 Feb 2017 17:31:13
h = h5f.create_dataset('dataset_1', data=a)
# Sun, 05 Feb 2017 17:31:58
h = h5f['data']
# Sun, 05 Feb 2017 17:32:09
h = h5f['dataset1']
# Sun, 05 Feb 2017 17:32:24
h5f.close()
# Sun, 05 Feb 2017 17:32:54
!h5dump data.h5
# Sun, 05 Feb 2017 17:33:21
h5f = h5py.File('data.h5', 'a')
# Sun, 05 Feb 2017 17:33:27
h = h5f.create_dataset('dataset_1', data=a)
# Sun, 05 Feb 2017 17:33:33
h = h5f.create_dataset('dataset_2', data=a)
# Sun, 05 Feb 2017 17:33:39
h.shape
#[Out]# (2, 3, 4)
# Sun, 05 Feb 2017 17:34:15
h.resize(1,2,12)
# Sun, 05 Feb 2017 17:34:23
h.resize(1,2,2)
# Sun, 05 Feb 2017 17:34:26
h.resize(1,2)
# Sun, 05 Feb 2017 17:42:16
import numpy as np
# Sun, 05 Feb 2017 17:42:17
from pandas importHDFStore,DataFrame# create (or open) an hdf5 file and opens in append mode
# Sun, 05 Feb 2017 17:42:18
hdf =HDFStore('storage.h5')
# Sun, 05 Feb 2017 17:42:29
from pandas importHDFStore,DataFrame
# Sun, 05 Feb 2017 17:42:34
from pandas importHDFStore, DataFrame
# Sun, 05 Feb 2017 17:42:42
from pandas import HDFStore,DataFrame
# Sun, 05 Feb 2017 17:43:13
hdf =HDFStore('storage.h5')
# Sun, 05 Feb 2017 17:43:15
ls
# Sun, 05 Feb 2017 17:48:06
df =DataFrame(np.random.rand(5,3), columns=('A','B','C'))
# Sun, 05 Feb 2017 17:48:08
df
#[Out]# A B C
#[Out]# 0 0.333166 0.092269 0.819371
#[Out]# 1 0.233245 0.649195 0.718987
#[Out]# 2 0.331667 0.237529 0.781798
#[Out]# 3 0.324188 0.452287 0.811880
#[Out]# 4 0.239229 0.153740 0.373840
# Sun, 05 Feb 2017 17:48:14
df =DataFrame(np.random.rand(5,3))
# Sun, 05 Feb 2017 17:48:16
df
#[Out]# 0 1 2
#[Out]# 0 0.612967 0.625579 0.176081
#[Out]# 1 0.795840 0.190902 0.055109
#[Out]# 2 0.307932 0.688888 0.057545
#[Out]# 3 0.823256 0.284031 0.398731
#[Out]# 4 0.449865 0.412283 0.823011
# Sun, 05 Feb 2017 17:48:25
df =DataFrame(np.random.rand(5,3,1))
# Sun, 05 Feb 2017 18:17:02
dset = f.create_dataset("unlimited", (10, 10), maxshape=(None, 10))
# Sun, 05 Feb 2017 18:17:04
f
#[Out]# <Closed HDF5 file>
# Sun, 05 Feb 2017 18:17:38
dset = hf5.create_dataset("unlimited", (10, 10, 10), maxshape=(None, 10.10))
# Sun, 05 Feb 2017 18:17:58
hdf
#[Out]# <class 'pandas.io.pytables.HDFStore'>
#[Out]# File path: storage.h5
#[Out]# Empty
# Sun, 05 Feb 2017 18:21:49
np.arange(10)
#[Out]# array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# Sun, 05 Feb 2017 18:25:16
f = File('foo.h5', 'w')
# Sun, 05 Feb 2017 18:25:22
f['data'] = np.ones((4, 3, 2), 'f')
# Sun, 05 Feb 2017 18:25:25
f
#[Out]# <Closed HDF5 file>
# Sun, 05 Feb 2017 18:34:02
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 18:35:48
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 18:36:23
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 18:38:28
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 18:38:47
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 18:39:47
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:20:37
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:21:15
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:21:23
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:21:36
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:24:28
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:24:33
f
#[Out]# <Closed HDF5 file>
# Sun, 05 Feb 2017 20:24:42
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:25:55
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:26:13
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:26:34
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:27:28
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:28:04
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:30:01
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:30:57
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:32:37
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:33:14
run dataExtractionAutomator.py
# Sun, 05 Feb 2017 20:35:04
# This file automatically finds all the .ptw files in a given directory and
# Sun, 05 Feb 2017 20:35:05
# converts them to a numpy array for reseach purposes. It bypasses the need of
# Sun, 05 Feb 2017 20:35:05
# a proprietary software like ALTAIR to extract and read the data.
# Sun, 05 Feb 2017 20:35:05
'''
Before running this file perform 'pip install pyradi'
Please run the test file before running this file and follow the below
instructions to remove the error in the pyradi package, if any.
Comment out the line 516 in the file 'ryptw.py'
Header.h_Framatone = ord(headerinfo[3076:3077])
This ensures smooth running as required for this program.
'''
#[Out]# "\nBefore running this file perform 'pip install pyradi'\nPlease run the test file before running this file and follow the below\ninstructions to remove the error in the pyradi package, if any.\n\nComment out the line 516 in the file 'ryptw.py'\nHeader.h_Framatone = ord(headerinfo[3076:3077])\nThis ensures smooth running as required for this program.\n"
# Sun, 05 Feb 2017 20:35:05
# from IPython.display import display
# Sun, 05 Feb 2017 20:35:06
# from IPython.display import Image
# Sun, 05 Feb 2017 20:35:06
# from IPython.display import HTML
# Sun, 05 Feb 2017 20:35:06
#make pngs at 150 dpi
# Sun, 05 Feb 2017 20:35:06
'''
import matplotlib as mpl
mpl.rc("savefig", dpi=75)
mpl.rc('figure', figsize=(10,8))
'''
#[Out]# '\nimport matplotlib as mpl\nmpl.rc("savefig", dpi=75)\nmpl.rc(\'figure\', figsize=(10,8))\n'
# Sun, 05 Feb 2017 20:35:06
import numpy as np
# Sun, 05 Feb 2017 20:35:06
import numpy as np
# Sun, 05 Feb 2017 20:35:06
from pandas import HDFStore,DataFrame # create (or open) an hdf5 file and opens in append mode
# Sun, 05 Feb 2017 20:35:06
import os
# Sun, 05 Feb 2017 20:35:07
import pyradi.ryptw as ryptw
# Sun, 05 Feb 2017 20:35:07
import pyradi.ryplot as ryplot
# Sun, 05 Feb 2017 20:35:07
import pyradi.ryfiles as ryfiles
# Sun, 05 Feb 2017 20:35:08
def getPTWFilePaths(directory,excludeFiles):
file_paths = []
file_name = []
file_loc = []
for (root, directories, files) in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
fileloc = os.path.join(root)
if filename.endswith(".ptw") and filename!=excludeFiles:
file_paths.append(filepath) # Add it to the list.
file_name.append(filename)
file_loc.append(fileloc)
# break #To do a deep search in all the sub-directories
return file_paths, file_name, file_loc
# Sun, 05 Feb 2017 20:35:08
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
return 0
return 1
# Sun, 05 Feb 2017 20:35:09
def saveHDF5Array(data, filename, iter, f=None):
global file_Location_npy
filename = file_Location_npy[iter] + '\\' + filename
if not ensure_dir(filename):
f = h5py.File(filename, 'a')
dataset = f.create_dataset("data", data = data)
return f
# Sun, 05 Feb 2017 20:35:09
# dataset = f.create_dataset("data", data = data)
# Sun, 05 Feb 2017 20:35:09
# np.save(filename,data)
# Sun, 05 Feb 2017 20:35:09
pass
# Sun, 05 Feb 2017 20:35:10
def savePandasHDF5Array(data, filename, iter, f=None):
global file_Location_npy
filename = file_Location_npy[iter] + '\\' + filename
if not ensure_dir(filename):
# hdf = HDFStore('storage.h5')
hdf = HDFStore(filename)
# f = h5py.File(filename, 'a')
# dataset = f.create_dataset("data", data = data)
return f
# Sun, 05 Feb 2017 20:35:10
df =DataFrame(np.random.rand(5,3), columns=('A','B','C'))# put the dataset in the storage
# Sun, 05 Feb 2017 20:35:10
hdf.put('d1', df, format='table', data_columns=True)
# Sun, 05 Feb 2017 20:35:11
# dataset = f.create_dataset("data", data = data)
# Sun, 05 Feb 2017 20:35:11
# np.save(filename,data)
# Sun, 05 Feb 2017 20:35:11
pass
# Sun, 05 Feb 2017 20:35:11
def saveNPArray(data, filename, iter):
global file_Location_npy
filename = file_Location_npy[iter] + '\\' + filename
ensure_dir(filename)
np.save(filename,data)
pass
# Sun, 05 Feb 2017 20:35:12
def saveJPGPics(frames, filename, iter):
global file_Location_npy
filename = file_Location_jpg[iter] + '\\' + filename + '\\frame_'
ensure_dir(filename)
global ext
# filename = file_Location_jpg[iter] + 'frame'
i=0
for frame in frames:
i+=1
ryfiles.rawFrameToImageFile(frame, filename+str(i)+ext)
# np.save(filename,frame)
pass
# Sun, 05 Feb 2017 20:35:13
def autoPTW2NP(ptwfilepath, ptwfilename, iter):
# ptwfile = './PyradiSampleLWIR.ptw'
# outfilename = 'PyradiSampleLWIR.txt'
header = ryptw.readPTWHeader(ptwfilepath)
rows = header.h_Rows
cols = header.h_Cols
# ryptw.showHeader(header) # Suppressed Output
# numFrames = header.h_lastframe # Total frames in the video
numFrames = 100 # Testing time
framesToLoad = range(1, numFrames+1, 1)
frames = len(framesToLoad)
data, fheaders = ryptw.getPTWFrame (header, framesToLoad[0])
#f = saveHDF5Array(frame, ptwfilename, iter)
for frame in framesToLoad[1:]:
f, fheaders = (ryptw.getPTWFrame (header, frame))
data = np.concatenate((data, f)) print frame # saveNPArray(frame, ptwfilename, iter) # saveJPGPics(frame, ptwfilename, iter) print data.shape
# Sun, 05 Feb 2017 20:35:13
img = data.reshape(frames, rows ,cols)
# Sun, 05 Feb 2017 20:35:13
print(img.shape)
# Sun, 05 Feb 2017 20:35:14
# saveNPArray(img, ptwfilename, iter)
# Sun, 05 Feb 2017 20:35:14
saveJPGPics(img, ptwfilename, iter)
# Sun, 05 Feb 2017 20:35:14
return data
# Sun, 05 Feb 2017 20:35:14
excludeFiles = '1_0.5ws_4wfr_18lpm.ptw'
# Sun, 05 Feb 2017 20:35:14
# saveFolderLocation = './Extracted Data/'
# Sun, 05 Feb 2017 20:35:14
# Save format for Image
# Sun, 05 Feb 2017 20:35:14
ext = '.png'
# Sun, 05 Feb 2017 20:35:15
# Root Location for the original data
# Sun, 05 Feb 2017 20:35:15
dataLocation = 'F:\\Vishruit\\DATA'
# Sun, 05 Feb 2017 20:35:15
# Home location for saving the format file
# Sun, 05 Feb 2017 20:35:15
data_npyLocation = dataLocation + '_npy'
# Sun, 05 Feb 2017 20:35:15
data_jpgLocation = dataLocation + '_jpg'
# Sun, 05 Feb 2017 20:35:15
# Actual filepaths and filenames list
# Sun, 05 Feb 2017 20:35:15
[file_paths, file_names, file_locs] = getPTWFilePaths(dataLocation, excludeFiles)
# Sun, 05 Feb 2017 20:35:15
# Creating filepaths for desired file format
# Sun, 05 Feb 2017 20:35:16
# Also creating fileLocation paths
# Sun, 05 Feb 2017 20:35:16
file_paths_npy = []
# Sun, 05 Feb 2017 20:35:16
file_paths_jpg = []
# Sun, 05 Feb 2017 20:35:16
file_Location_npy = []
# Sun, 05 Feb 2017 20:35:16
file_Location_jpg = []
# Sun, 05 Feb 2017 20:35:17
for file_path in file_paths:
file_paths_npy.append( data_npyLocation + file_path[len(dataLocation):len(file_path)] )
file_paths_jpg.append( data_jpgLocation + file_path[len(dataLocation):len(file_path)] )
# Sun, 05 Feb 2017 20:35:17
# Save folder locations
# Sun, 05 Feb 2017 20:35:17
for file_loc in file_locs:
file_Location_npy.append( data_npyLocation + file_loc[len(dataLocation):len(file_loc)] )
file_Location_jpg.append( data_jpgLocation + file_loc[len(dataLocation):len(file_loc)] )
# Sun, 05 Feb 2017 20:35:17
print file_paths[1]
# Sun, 05 Feb 2017 20:35:17
for iter in range(len(file_paths) - 202): # len(file_paths)
autoPTW2NP(file_paths[iter], file_names[iter], iter)
# Sun, 05 Feb 2017 20:35:36
'''
filename = "/my/directory/filename.txt"
dir = os.path.dirname(filename)
'''
#[Out]# '\nfilename = "/my/directory/filename.txt"\ndir = os.path.dirname(filename)\n'
# Sun, 05 Feb 2017 20:35:36
# from joblib import Parallel, delayed
# Sun, 05 Feb 2017 20:35:36
# import multiprocessing
# Sun, 05 Feb 2017 20:35:37
# num_cores = multiprocessing.cpu_count()
# Sun, 05 Feb 2017 20:35:37
# Parallel(n_jobs=num_cores)(delayed(autoPTW2NP)(file_paths[iter], file_names[iter]) for iter in range(10))
# Sun, 05 Feb 2017 20:36:27
run dataExtractionAutomator.py
| gpl-3.0 |
davidam/python-examples | scikit/plot_adaboost_multiclass.py | 38 | 4126 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1]_ and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1]_ algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| gpl-3.0 |
giorgiop/scikit-learn | sklearn/linear_model/ransac.py | 16 | 17217 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
from ..utils.validation import has_fit_parameter
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
NOTE: residual_metric is deprecated from 0.18 and will be removed in 0.20
Use ``loss`` instead.
loss : string, callable, optional, default "absolute_loss"
String inputs, "absolute_loss" and "squared_loss" are supported which
find the absolute loss and squared loss per sample
respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the ``i``th value of the array corresponding to the loss
on `X[i]`.
If the loss on a sample is greater than the ``residual_threshold``, then
this sample is classified as an outlier.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] https://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
loss='absolute_loss', random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
sample_weight : array-like, shape = [n_samples]
Individual weights for each sample
raises error if sample_weight is passed and base_estimator
fit method does not support it.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is not None:
warnings.warn(
"'residual_metric' was deprecated in version 0.18 and "
"will be removed in version 0.20. Use 'loss' instead.",
DeprecationWarning)
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
# XXX: Deprecation: Remove this if block in 0.20
if self.residual_metric is not None:
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = self.residual_metric(diff)
else:
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
badlogicmanpreet/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/transforms.py | 69 | 75638 | """
matplotlib includes a framework for arbitrary geometric
transformations that is used determine the final position of all
elements drawn on the canvas.
Transforms are composed into trees of :class:`TransformNode` objects
whose actual value depends on their children. When the contents of
children change, their parents are automatically invalidated. The
next time an invalidated transform is accessed, it is recomputed to
reflect those changes. This invalidation/caching approach prevents
unnecessary recomputations of transforms, and contributes to better
interactive performance.
For example, here is a graph of the transform tree used to plot data
to the graph:
.. image:: ../_static/transforms.png
The framework can be used for both affine and non-affine
transformations. However, for speed, we want use the backend
renderers to perform affine transformations whenever possible.
Therefore, it is possible to perform just the affine or non-affine
part of a transformation on a set of data. The affine is always
assumed to occur after the non-affine. For any transform::
full transform == non-affine part + affine part
The backends are not expected to handle non-affine transformations
themselves.
"""
import numpy as np
from numpy import ma
from matplotlib._path import affine_transform
from numpy.linalg import inv
from weakref import WeakKeyDictionary
import warnings
try:
set
except NameError:
from sets import Set as set
import cbook
from path import Path
from _path import count_bboxes_overlapping_bbox, update_path_extents
DEBUG = False
if DEBUG:
import warnings
MaskedArray = ma.MaskedArray
class TransformNode(object):
"""
:class:`TransformNode` is the base class for anything that
participates in the transform tree and needs to invalidate its
parents or be invalidated. This includes classes that are not
really transforms, such as bounding boxes, since some transforms
depend on bounding boxes to compute their values.
"""
_gid = 0
# Invalidation may affect only the affine part. If the
# invalidation was "affine-only", the _invalid member is set to
# INVALID_AFFINE_ONLY
INVALID_NON_AFFINE = 1
INVALID_AFFINE = 2
INVALID = INVALID_NON_AFFINE | INVALID_AFFINE
# Some metadata about the transform, used to determine whether an
# invalidation is affine-only
is_affine = False
is_bbox = False
# If pass_through is True, all ancestors will always be
# invalidated, even if 'self' is already invalid.
pass_through = False
def __init__(self):
"""
Creates a new :class:`TransformNode`.
"""
# Parents are stored in a WeakKeyDictionary, so that if the
# parents are deleted, references from the children won't keep
# them alive.
self._parents = WeakKeyDictionary()
# TransformNodes start out as invalid until their values are
# computed for the first time.
self._invalid = 1
def __copy__(self, *args):
raise NotImplementedError(
"TransformNode instances can not be copied. " +
"Consider using frozen() instead.")
__deepcopy__ = __copy__
def invalidate(self):
"""
Invalidate this :class:`TransformNode` and all of its
ancestors. Should be called any time the transform changes.
"""
# If we are an affine transform being changed, we can set the
# flag to INVALID_AFFINE_ONLY
value = (self.is_affine) and self.INVALID_AFFINE or self.INVALID
# Shortcut: If self is already invalid, that means its parents
# are as well, so we don't need to do anything.
if self._invalid == value:
return
if not len(self._parents):
self._invalid = value
return
# Invalidate all ancestors of self using pseudo-recursion.
stack = [self]
while len(stack):
root = stack.pop()
# Stop at subtrees that have already been invalidated
if root._invalid != value or root.pass_through:
root._invalid = self.INVALID
stack.extend(root._parents.keys())
def set_children(self, *children):
"""
Set the children of the transform, to let the invalidation
system know which transforms can invalidate this transform.
Should be called from the constructor of any transforms that
depend on other transforms.
"""
for child in children:
child._parents[self] = None
if DEBUG:
_set_children = set_children
def set_children(self, *children):
self._set_children(*children)
self._children = children
set_children.__doc__ = _set_children.__doc__
def frozen(self):
"""
Returns a frozen copy of this transform node. The frozen copy
will not update when its children change. Useful for storing
a previously known state of a transform where
``copy.deepcopy()`` might normally be used.
"""
return self
if DEBUG:
def write_graphviz(self, fobj, highlight=[]):
"""
For debugging purposes.
Writes the transform tree rooted at 'self' to a graphviz "dot"
format file. This file can be run through the "dot" utility
to produce a graph of the transform tree.
Affine transforms are marked in blue. Bounding boxes are
marked in yellow.
*fobj*: A Python file-like object
"""
seen = set()
def recurse(root):
if root in seen:
return
seen.add(root)
props = {}
label = root.__class__.__name__
if root._invalid:
label = '[%s]' % label
if root in highlight:
props['style'] = 'bold'
props['shape'] = 'box'
props['label'] = '"%s"' % label
props = ' '.join(['%s=%s' % (key, val) for key, val in props.items()])
fobj.write('%s [%s];\n' %
(hash(root), props))
if hasattr(root, '_children'):
for child in root._children:
name = '?'
for key, val in root.__dict__.items():
if val is child:
name = key
break
fobj.write('%s -> %s [label="%s", fontsize=10];\n' % (
hash(root),
hash(child),
name))
recurse(child)
fobj.write("digraph G {\n")
recurse(self)
fobj.write("}\n")
else:
def write_graphviz(self, fobj, highlight=[]):
return
class BboxBase(TransformNode):
"""
This is the base class of all bounding boxes, and provides
read-only access to its data. A mutable bounding box is provided
by the :class:`Bbox` class.
The canonical representation is as two points, with no
restrictions on their ordering. Convenience properties are
provided to get the left, bottom, right and top edges and width
and height, but these are not stored explicity.
"""
is_bbox = True
is_affine = True
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
if DEBUG:
def _check(points):
if ma.isMaskedArray(points):
warnings.warn("Bbox bounds are a masked array.")
points = np.asarray(points)
if (points[1,0] - points[0,0] == 0 or
points[1,1] - points[0,1] == 0):
warnings.warn("Singular Bbox.")
_check = staticmethod(_check)
def frozen(self):
return Bbox(self.get_points().copy())
frozen.__doc__ = TransformNode.__doc__
def __array__(self, *args, **kwargs):
return self.get_points()
def is_unit(self):
"""
Returns True if the :class:`Bbox` is the unit bounding box
from (0, 0) to (1, 1).
"""
return list(self.get_points().flatten()) == [0., 0., 1., 1.]
def _get_x0(self):
return self.get_points()[0, 0]
x0 = property(_get_x0, None, None, """
(property) :attr:`x0` is the first of the pair of *x* coordinates that
define the bounding box. :attr:`x0` is not guaranteed to be
less than :attr:`x1`. If you require that, use :attr:`xmin`.""")
def _get_y0(self):
return self.get_points()[0, 1]
y0 = property(_get_y0, None, None, """
(property) :attr:`y0` is the first of the pair of *y* coordinates that
define the bounding box. :attr:`y0` is not guaranteed to be
less than :attr:`y1`. If you require that, use :attr:`ymin`.""")
def _get_x1(self):
return self.get_points()[1, 0]
x1 = property(_get_x1, None, None, """
(property) :attr:`x1` is the second of the pair of *x* coordinates that
define the bounding box. :attr:`x1` is not guaranteed to be
greater than :attr:`x0`. If you require that, use :attr:`xmax`.""")
def _get_y1(self):
return self.get_points()[1, 1]
y1 = property(_get_y1, None, None, """
(property) :attr:`y1` is the second of the pair of *y* coordinates that
define the bounding box. :attr:`y1` is not guaranteed to be
greater than :attr:`y0`. If you require that, use :attr:`ymax`.""")
def _get_p0(self):
return self.get_points()[0]
p0 = property(_get_p0, None, None, """
(property) :attr:`p0` is the first pair of (*x*, *y*) coordinates that
define the bounding box. It is not guaranteed to be the bottom-left
corner. For that, use :attr:`min`.""")
def _get_p1(self):
return self.get_points()[1]
p1 = property(_get_p1, None, None, """
(property) :attr:`p1` is the second pair of (*x*, *y*) coordinates that
define the bounding box. It is not guaranteed to be the top-right
corner. For that, use :attr:`max`.""")
def _get_xmin(self):
return min(self.get_points()[:, 0])
xmin = property(_get_xmin, None, None, """
(property) :attr:`xmin` is the left edge of the bounding box.""")
def _get_ymin(self):
return min(self.get_points()[:, 1])
ymin = property(_get_ymin, None, None, """
(property) :attr:`ymin` is the bottom edge of the bounding box.""")
def _get_xmax(self):
return max(self.get_points()[:, 0])
xmax = property(_get_xmax, None, None, """
(property) :attr:`xmax` is the right edge of the bounding box.""")
def _get_ymax(self):
return max(self.get_points()[:, 1])
ymax = property(_get_ymax, None, None, """
(property) :attr:`ymax` is the top edge of the bounding box.""")
def _get_min(self):
return [min(self.get_points()[:, 0]),
min(self.get_points()[:, 1])]
min = property(_get_min, None, None, """
(property) :attr:`min` is the bottom-left corner of the bounding
box.""")
def _get_max(self):
return [max(self.get_points()[:, 0]),
max(self.get_points()[:, 1])]
max = property(_get_max, None, None, """
(property) :attr:`max` is the top-right corner of the bounding box.""")
def _get_intervalx(self):
return self.get_points()[:, 0]
intervalx = property(_get_intervalx, None, None, """
(property) :attr:`intervalx` is the pair of *x* coordinates that define
the bounding box. It is not guaranteed to be sorted from left to
right.""")
def _get_intervaly(self):
return self.get_points()[:, 1]
intervaly = property(_get_intervaly, None, None, """
(property) :attr:`intervaly` is the pair of *y* coordinates that define
the bounding box. It is not guaranteed to be sorted from bottom to
top.""")
def _get_width(self):
points = self.get_points()
return points[1, 0] - points[0, 0]
width = property(_get_width, None, None, """
(property) The width of the bounding box. It may be negative if
:attr:`x1` < :attr:`x0`.""")
def _get_height(self):
points = self.get_points()
return points[1, 1] - points[0, 1]
height = property(_get_height, None, None, """
(property) The height of the bounding box. It may be negative if
:attr:`y1` < :attr:`y0`.""")
def _get_size(self):
points = self.get_points()
return points[1] - points[0]
size = property(_get_size, None, None, """
(property) The width and height of the bounding box. May be negative,
in the same way as :attr:`width` and :attr:`height`.""")
def _get_bounds(self):
x0, y0, x1, y1 = self.get_points().flatten()
return (x0, y0, x1 - x0, y1 - y0)
bounds = property(_get_bounds, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`,
:attr:`height`).""")
def _get_extents(self):
return self.get_points().flatten().copy()
extents = property(_get_extents, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`, :attr:`y1`).""")
def get_points(self):
return NotImplementedError()
def containsx(self, x):
"""
Returns True if *x* is between or equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x >= x0 and x <= x1))
or (x >= x1 and x <= x0))
def containsy(self, y):
"""
Returns True if *y* is between or equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y >= y0 and y <= y1))
or (y >= y1 and y <= y0))
def contains(self, x, y):
"""
Returns *True* if (*x*, *y*) is a coordinate inside the
bounding box or on its edge.
"""
return self.containsx(x) and self.containsy(y)
def overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 < ax1) or
(by2 < ay1) or
(bx1 > ax2) or
(by1 > ay2))
def fully_containsx(self, x):
"""
Returns True if *x* is between but not equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x > x0 and x < x1))
or (x > x1 and x < x0))
def fully_containsy(self, y):
"""
Returns True if *y* is between but not equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (x > y0 and x < y1))
or (x > y1 and x < y0))
def fully_contains(self, x, y):
"""
Returns True if (*x*, *y*) is a coordinate inside the bounding
box, but not on its edge.
"""
return self.fully_containsx(x) \
and self.fully_containsy(y)
def fully_overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*, but not on its edge alone.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 <= ax1) or
(by2 <= ay1) or
(bx1 >= ax2) or
(by1 >= ay2))
def transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the given transform.
"""
return Bbox(transform.transform(self.get_points()))
def inverse_transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the inverse of the given transform.
"""
return Bbox(transform.inverted().transform(self.get_points()))
coefs = {'C': (0.5, 0.5),
'SW': (0,0),
'S': (0.5, 0),
'SE': (1.0, 0),
'E': (1.0, 0.5),
'NE': (1.0, 1.0),
'N': (0.5, 1.0),
'NW': (0, 1.0),
'W': (0, 0.5)}
def anchored(self, c, container = None):
"""
Return a copy of the :class:`Bbox`, shifted to position *c*
within a container.
*c*: may be either:
* a sequence (*cx*, *cy*) where *cx* and *cy* range from 0
to 1, where 0 is left or bottom and 1 is right or top
* a string:
- 'C' for centered
- 'S' for bottom-center
- 'SE' for bottom-left
- 'E' for left
- etc.
Optional argument *container* is the box within which the
:class:`Bbox` is positioned; it defaults to the initial
:class:`Bbox`.
"""
if container is None:
container = self
l, b, w, h = container.bounds
if isinstance(c, str):
cx, cy = self.coefs[c]
else:
cx, cy = c
L, B, W, H = self.bounds
return Bbox(self._points +
[(l + cx * (w-W)) - L,
(b + cy * (h-H)) - B])
def shrunk(self, mx, my):
"""
Return a copy of the :class:`Bbox`, shrunk by the factor *mx*
in the *x* direction and the factor *my* in the *y* direction.
The lower left corner of the box remains unchanged. Normally
*mx* and *my* will be less than 1, but this is not enforced.
"""
w, h = self.size
return Bbox([self._points[0],
self._points[0] + [mx * w, my * h]])
def shrunk_to_aspect(self, box_aspect, container = None, fig_aspect = 1.0):
"""
Return a copy of the :class:`Bbox`, shrunk so that it is as
large as it can be while having the desired aspect ratio,
*box_aspect*. If the box coordinates are relative---that
is, fractions of a larger box such as a figure---then the
physical aspect ratio of that figure is specified with
*fig_aspect*, so that *box_aspect* can also be given as a
ratio of the absolute dimensions, not the relative dimensions.
"""
assert box_aspect > 0 and fig_aspect > 0
if container is None:
container = self
w, h = container.size
H = w * box_aspect/fig_aspect
if H <= h:
W = w
else:
W = h * fig_aspect/box_aspect
H = h
return Bbox([self._points[0],
self._points[0] + (W, H)])
def splitx(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with vertical lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
xf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
w = x1 - x0
for xf0, xf1 in zip(xf[:-1], xf[1:]):
boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]]))
return boxes
def splity(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with horizontal lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
yf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
h = y1 - y0
for yf0, yf1 in zip(yf[:-1], yf[1:]):
boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]))
return boxes
def count_contains(self, vertices):
"""
Count the number of vertices contained in the :class:`Bbox`.
*vertices* is a Nx2 Numpy array.
"""
if len(vertices) == 0:
return 0
vertices = np.asarray(vertices)
x0, y0, x1, y1 = self._get_extents()
dx0 = np.sign(vertices[:, 0] - x0)
dy0 = np.sign(vertices[:, 1] - y0)
dx1 = np.sign(vertices[:, 0] - x1)
dy1 = np.sign(vertices[:, 1] - y1)
inside = (abs(dx0 + dx1) + abs(dy0 + dy1)) <= 2
return np.sum(inside)
def count_overlaps(self, bboxes):
"""
Count the number of bounding boxes that overlap this one.
bboxes is a sequence of :class:`BboxBase` objects
"""
return count_bboxes_overlapping_bbox(self, bboxes)
def expanded(self, sw, sh):
"""
Return a new :class:`Bbox` which is this :class:`Bbox`
expanded around its center by the given factors *sw* and
*sh*.
"""
width = self.width
height = self.height
deltaw = (sw * width - width) / 2.0
deltah = (sh * height - height) / 2.0
a = np.array([[-deltaw, -deltah], [deltaw, deltah]])
return Bbox(self._points + a)
def padded(self, p):
"""
Return a new :class:`Bbox` that is padded on all four sides by
the given value.
"""
points = self._points
return Bbox(points + [[-p, -p], [p, p]])
def translated(self, tx, ty):
"""
Return a copy of the :class:`Bbox`, statically translated by
*tx* and *ty*.
"""
return Bbox(self._points + (tx, ty))
def corners(self):
"""
Return an array of points which are the four corners of this
rectangle. For example, if this :class:`Bbox` is defined by
the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns
(*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*).
"""
l, b, r, t = self.get_points().flatten()
return np.array([[l, b], [l, t], [r, b], [r, t]])
def rotated(self, radians):
"""
Return a new bounding box that bounds a rotated version of
this bounding box by the given radians. The new bounding box
is still aligned with the axes, of course.
"""
corners = self.corners()
corners_rotated = Affine2D().rotate(radians).transform(corners)
bbox = Bbox.unit()
bbox.update_from_data_xy(corners_rotated, ignore=True)
return bbox
#@staticmethod
def union(bboxes):
"""
Return a :class:`Bbox` that contains all of the given bboxes.
"""
assert(len(bboxes))
if len(bboxes) == 1:
return bboxes[0]
x0 = np.inf
y0 = np.inf
x1 = -np.inf
y1 = -np.inf
for bbox in bboxes:
points = bbox.get_points()
xs = points[:, 0]
ys = points[:, 1]
x0 = min(x0, np.min(xs))
y0 = min(y0, np.min(ys))
x1 = max(x1, np.max(xs))
y1 = max(y1, np.max(ys))
return Bbox.from_extents(x0, y0, x1, y1)
union = staticmethod(union)
class Bbox(BboxBase):
"""
A mutable bounding box.
"""
def __init__(self, points):
"""
*points*: a 2x2 numpy array of the form [[x0, y0], [x1, y1]]
If you need to create a :class:`Bbox` object from another form
of data, consider the static methods :meth:`unit`,
:meth:`from_bounds` and :meth:`from_extents`.
"""
BboxBase.__init__(self)
self._points = np.asarray(points, np.float_)
self._minpos = np.array([0.0000001, 0.0000001])
self._ignore = True
if DEBUG:
___init__ = __init__
def __init__(self, points):
self._check(points)
self.___init__(points)
def invalidate(self):
self._check(self._points)
TransformNode.invalidate(self)
_unit_values = np.array([[0.0, 0.0], [1.0, 1.0]], np.float_)
#@staticmethod
def unit():
"""
(staticmethod) Create a new unit :class:`Bbox` from (0, 0) to
(1, 1).
"""
return Bbox(Bbox._unit_values.copy())
unit = staticmethod(unit)
#@staticmethod
def from_bounds(x0, y0, width, height):
"""
(staticmethod) Create a new :class:`Bbox` from *x0*, *y0*,
*width* and *height*.
*width* and *height* may be negative.
"""
return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
from_bounds = staticmethod(from_bounds)
#@staticmethod
def from_extents(*args):
"""
(staticmethod) Create a new Bbox from *left*, *bottom*,
*right* and *top*.
The *y*-axis increases upwards.
"""
points = np.array(args, dtype=np.float_).reshape(2, 2)
return Bbox(points)
from_extents = staticmethod(from_extents)
def __repr__(self):
return 'Bbox(%s)' % repr(self._points)
__str__ = __repr__
def ignore(self, value):
"""
Set whether the existing bounds of the box should be ignored
by subsequent calls to :meth:`update_from_data` or
:meth:`update_from_data_xy`.
*value*:
- When True, subsequent calls to :meth:`update_from_data`
will ignore the existing bounds of the :class:`Bbox`.
- When False, subsequent calls to :meth:`update_from_data`
will include the existing bounds of the :class:`Bbox`.
"""
self._ignore = value
def update_from_data(self, x, y, ignore=None):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*x*: a numpy array of *x*-values
*y*: a numpy array of *y*-values
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
"""
warnings.warn(
"update_from_data requires a memory copy -- please replace with update_from_data_xy")
xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1))))
return self.update_from_data_xy(xy, ignore)
def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*path*: a :class:`~matplotlib.path.Path` instance
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if ignore is None:
ignore = self._ignore
if path.vertices.size == 0:
return
points, minpos, changed = update_path_extents(
path, None, self._points, self._minpos, ignore)
if changed:
self.invalidate()
if updatex:
self._points[:,0] = points[:,0]
self._minpos[0] = minpos[0]
if updatey:
self._points[:,1] = points[:,1]
self._minpos[1] = minpos[1]
def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*xy*: a numpy array of 2D points
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if len(xy) == 0:
return
path = Path(xy)
self.update_from_path(path, ignore=ignore,
updatex=updatex, updatey=updatey)
def _set_x0(self, val):
self._points[0, 0] = val
self.invalidate()
x0 = property(BboxBase._get_x0, _set_x0)
def _set_y0(self, val):
self._points[0, 1] = val
self.invalidate()
y0 = property(BboxBase._get_y0, _set_y0)
def _set_x1(self, val):
self._points[1, 0] = val
self.invalidate()
x1 = property(BboxBase._get_x1, _set_x1)
def _set_y1(self, val):
self._points[1, 1] = val
self.invalidate()
y1 = property(BboxBase._get_y1, _set_y1)
def _set_p0(self, val):
self._points[0] = val
self.invalidate()
p0 = property(BboxBase._get_p0, _set_p0)
def _set_p1(self, val):
self._points[1] = val
self.invalidate()
p1 = property(BboxBase._get_p1, _set_p1)
def _set_intervalx(self, interval):
self._points[:, 0] = interval
self.invalidate()
intervalx = property(BboxBase._get_intervalx, _set_intervalx)
def _set_intervaly(self, interval):
self._points[:, 1] = interval
self.invalidate()
intervaly = property(BboxBase._get_intervaly, _set_intervaly)
def _set_bounds(self, bounds):
l, b, w, h = bounds
points = np.array([[l, b], [l+w, b+h]], np.float_)
if np.any(self._points != points):
self._points = points
self.invalidate()
bounds = property(BboxBase._get_bounds, _set_bounds)
def _get_minpos(self):
return self._minpos
minpos = property(_get_minpos)
def _get_minposx(self):
return self._minpos[0]
minposx = property(_get_minposx)
def _get_minposy(self):
return self._minpos[1]
minposy = property(_get_minposy)
def get_points(self):
"""
Get the points of the bounding box directly as a numpy array
of the form: [[x0, y0], [x1, y1]].
"""
self._invalid = 0
return self._points
def set_points(self, points):
"""
Set the points of the bounding box directly from a numpy array
of the form: [[x0, y0], [x1, y1]]. No error checking is
performed, as this method is mainly for internal use.
"""
if np.any(self._points != points):
self._points = points
self.invalidate()
def set(self, other):
"""
Set this bounding box from the "frozen" bounds of another
:class:`Bbox`.
"""
if np.any(self._points != other.get_points()):
self._points = other.get_points()
self.invalidate()
class TransformedBbox(BboxBase):
"""
A :class:`Bbox` that is automatically transformed by a given
transform. When either the child bounding box or transform
changes, the bounds of this bbox will update accordingly.
"""
def __init__(self, bbox, transform):
"""
*bbox*: a child :class:`Bbox`
*transform*: a 2D :class:`Transform`
"""
assert bbox.is_bbox
assert isinstance(transform, Transform)
assert transform.input_dims == 2
assert transform.output_dims == 2
BboxBase.__init__(self)
self._bbox = bbox
self._transform = transform
self.set_children(bbox, transform)
self._points = None
def __repr__(self):
return "TransformedBbox(%s, %s)" % (self._bbox, self._transform)
__str__ = __repr__
def get_points(self):
if self._invalid:
points = self._transform.transform(self._bbox.get_points())
if ma.isMaskedArray(points):
points.putmask(0.0)
points = np.asarray(points)
self._points = points
self._invalid = 0
return self._points
get_points.__doc__ = Bbox.get_points.__doc__
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
class Transform(TransformNode):
"""
The base class of all :class:`TransformNode` instances that
actually perform a transformation.
All non-affine transformations should be subclasses of this class.
New affine transformations should be subclasses of
:class:`Affine2D`.
Subclasses of this class should override the following members (at
minimum):
- :attr:`input_dims`
- :attr:`output_dims`
- :meth:`transform`
- :attr:`is_separable`
- :attr:`has_inverse`
- :meth:`inverted` (if :meth:`has_inverse` can return True)
If the transform needs to do something non-standard with
:class:`mathplotlib.path.Path` objects, such as adding curves
where there were once line segments, it should override:
- :meth:`transform_path`
"""
# The number of input and output dimensions for this transform.
# These must be overridden (with integers) in the subclass.
input_dims = None
output_dims = None
# True if this transform as a corresponding inverse transform.
has_inverse = False
# True if this transform is separable in the x- and y- dimensions.
is_separable = False
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
def __add__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(self, other)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __radd__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(other, self)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __array__(self, *args, **kwargs):
"""
Used by C/C++ -based backends to get at the array matrix data.
"""
return self.frozen().__array__()
def transform(self, values):
"""
Performs the transformation on the given array of values.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
raise NotImplementedError()
def transform_affine(self, values):
"""
Performs only the affine part of this transformation on the
given array of values.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally a no-op. In
affine transformations, this is equivalent to
``transform(values)``.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return values
def transform_non_affine(self, values):
"""
Performs only the non-affine part of the transformation.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally equivalent to
``transform(values)``. In affine transformations, this is
always a no-op.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return self.transform(values)
def get_affine(self):
"""
Get the affine part of this transform.
"""
return IdentityTransform()
def transform_point(self, point):
"""
A convenience function that returns the transformed copy of a
single point.
The point is given as a sequence of length :attr:`input_dims`.
The transformed point is returned as a sequence of length
:attr:`output_dims`.
"""
assert len(point) == self.input_dims
return self.transform(np.asarray([point]))[0]
def transform_path(self, path):
"""
Returns a transformed copy of path.
*path*: a :class:`~matplotlib.path.Path` instance.
In some cases, this transform may insert curves into the path
that began as line segments.
"""
return Path(self.transform(path.vertices), path.codes)
def transform_path_affine(self, path):
"""
Returns a copy of path, transformed only by the affine part of
this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return path
def transform_path_non_affine(self, path):
"""
Returns a copy of path, transformed only by the non-affine
part of this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return Path(self.transform_non_affine(path.vertices), path.codes)
def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):
"""
Performs transformation on a set of angles anchored at
specific locations.
The *angles* must be a column vector (i.e., numpy array).
The *pts* must be a two-column numpy array of x,y positions
(angle transforms currently only work in 2D). This array must
have the same number of rows as *angles*.
*radians* indicates whether or not input angles are given in
radians (True) or degrees (False; the default).
*pushoff* is the distance to move away from *pts* for
determining transformed angles (see discussion of method
below).
The transformed angles are returned in an array with the same
size as *angles*.
The generic version of this method uses a very generic
algorithm that transforms *pts*, as well as locations very
close to *pts*, to find the angle in the transformed system.
"""
# Must be 2D
if self.input_dims <> 2 or self.output_dims <> 2:
raise NotImplementedError('Only defined in 2D')
# pts must be array with 2 columns for x,y
assert pts.shape[1] == 2
# angles must be a column vector and have same number of
# rows as pts
assert np.prod(angles.shape) == angles.shape[0] == pts.shape[0]
# Convert to radians if desired
if not radians:
angles = angles / 180.0 * np.pi
# Move a short distance away
pts2 = pts + pushoff * np.c_[ np.cos(angles), np.sin(angles) ]
# Transform both sets of points
tpts = self.transform( pts )
tpts2 = self.transform( pts2 )
# Calculate transformed angles
d = tpts2 - tpts
a = np.arctan2( d[:,1], d[:,0] )
# Convert back to degrees if desired
if not radians:
a = a * 180.0 / np.pi
return a
def inverted(self):
"""
Return the corresponding inverse transformation.
The return value of this method should be treated as
temporary. An update to *self* does not cause a corresponding
update to its inverted copy.
``x === self.inverted().transform(self.transform(x))``
"""
raise NotImplementedError()
class TransformWrapper(Transform):
"""
A helper class that holds a single child transform and acts
equivalently to it.
This is useful if a node of the transform tree must be replaced at
run time with a transform of a different type. This class allows
that replacement to correctly trigger invalidation.
Note that :class:`TransformWrapper` instances must have the same
input and output dimensions during their entire lifetime, so the
child transform may only be replaced with another child transform
of the same dimensions.
"""
pass_through = True
is_affine = False
def __init__(self, child):
"""
*child*: A class:`Transform` instance. This child may later
be replaced with :meth:`set`.
"""
assert isinstance(child, Transform)
Transform.__init__(self)
self.input_dims = child.input_dims
self.output_dims = child.output_dims
self._set(child)
self._invalid = 0
def __repr__(self):
return "TransformWrapper(%r)" % self._child
__str__ = __repr__
def frozen(self):
return self._child.frozen()
frozen.__doc__ = Transform.frozen.__doc__
def _set(self, child):
self._child = child
self.set_children(child)
self.transform = child.transform
self.transform_affine = child.transform_affine
self.transform_non_affine = child.transform_non_affine
self.transform_path = child.transform_path
self.transform_path_affine = child.transform_path_affine
self.transform_path_non_affine = child.transform_path_non_affine
self.get_affine = child.get_affine
self.inverted = child.inverted
def set(self, child):
"""
Replace the current child of this transform with another one.
The new child must have the same number of input and output
dimensions as the current child.
"""
assert child.input_dims == self.input_dims
assert child.output_dims == self.output_dims
self._set(child)
self._invalid = 0
self.invalidate()
self._invalid = 0
def _get_is_separable(self):
return self._child.is_separable
is_separable = property(_get_is_separable)
def _get_has_inverse(self):
return self._child.has_inverse
has_inverse = property(_get_has_inverse)
class AffineBase(Transform):
"""
The base class of all affine transformations of any number of
dimensions.
"""
is_affine = True
def __init__(self):
Transform.__init__(self)
self._inverted = None
def __array__(self, *args, **kwargs):
return self.get_matrix()
#@staticmethod
def _concat(a, b):
"""
Concatenates two transformation matrices (represented as numpy
arrays) together.
"""
return np.dot(b, a)
_concat = staticmethod(_concat)
def get_matrix(self):
"""
Get the underlying transformation matrix as a numpy array.
"""
raise NotImplementedError()
def transform_non_affine(self, points):
return points
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_affine(self, path):
return self.transform_path(path)
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
return path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Transform.get_affine.__doc__
class Affine2DBase(AffineBase):
"""
The base class of all 2D affine transformations.
2D affine transformations are performed using a 3x3 numpy array::
a c e
b d f
0 0 1
This class provides the read-only interface. For a mutable 2D
affine transformation, use :class:`Affine2D`.
Subclasses of this class will generally only need to override a
constructor and :meth:`get_matrix` that generates a custom 3x3 matrix.
"""
input_dims = 2
output_dims = 2
#* Redundant: Removed for performance
#
# def __init__(self):
# Affine2DBase.__init__(self)
def frozen(self):
return Affine2D(self.get_matrix().copy())
frozen.__doc__ = AffineBase.frozen.__doc__
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
def __array__(self, *args, **kwargs):
return self.get_matrix()
def to_values(self):
"""
Return the values of the matrix as a sequence (a,b,c,d,e,f)
"""
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flatten())
#@staticmethod
def matrix_from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new transformation matrix as a 3x3
numpy array of the form::
a c e
b d f
0 0 1
"""
return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_)
matrix_from_values = staticmethod(matrix_from_values)
def transform(self, points):
mtx = self.get_matrix()
if isinstance(points, MaskedArray):
tpoints = affine_transform(points.data, mtx)
return ma.MaskedArray(tpoints, mask=ma.getmask(points))
return affine_transform(points, mtx)
def transform_point(self, point):
mtx = self.get_matrix()
return affine_transform(point, mtx)
transform_point.__doc__ = AffineBase.transform_point.__doc__
if DEBUG:
_transform = transform
def transform(self, points):
# The major speed trap here is just converting to the
# points to an array in the first place. If we can use
# more arrays upstream, that should help here.
if (not ma.isMaskedArray(points) and
not isinstance(points, np.ndarray)):
warnings.warn(
('A non-numpy array of type %s was passed in for ' +
'transformation. Please correct this.')
% type(values))
return self._transform(points)
transform.__doc__ = AffineBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = AffineBase.transform_affine.__doc__
def inverted(self):
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
self._inverted = Affine2D(inv(mtx))
self._invalid = 0
return self._inverted
inverted.__doc__ = AffineBase.inverted.__doc__
class Affine2D(Affine2DBase):
"""
A mutable 2D affine transformation.
"""
def __init__(self, matrix = None):
"""
Initialize an Affine transform from a 3x3 numpy float array::
a c e
b d f
0 0 1
If *matrix* is None, initialize with the identity transform.
"""
Affine2DBase.__init__(self)
if matrix is None:
matrix = np.identity(3)
elif DEBUG:
matrix = np.asarray(matrix, np.float_)
assert matrix.shape == (3, 3)
self._mtx = matrix
self._invalid = 0
def __repr__(self):
return "Affine2D(%s)" % repr(self._mtx)
__str__ = __repr__
def __cmp__(self, other):
if (isinstance(other, Affine2D) and
(self.get_matrix() == other.get_matrix()).all()):
return 0
return -1
#@staticmethod
def from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new Affine2D instance from the given
values::
a c e
b d f
0 0 1
"""
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_)
.reshape((3,3)))
from_values = staticmethod(from_values)
def get_matrix(self):
"""
Get the underlying transformation matrix as a 3x3 numpy array::
a c e
b d f
0 0 1
"""
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
"""
Set the underlying transformation matrix from a 3x3 numpy array::
a c e
b d f
0 0 1
"""
self._mtx = mtx
self.invalidate()
def set(self, other):
"""
Set this transformation from the frozen copy of another
:class:`Affine2DBase` object.
"""
assert isinstance(other, Affine2DBase)
self._mtx = other.get_matrix()
self.invalidate()
#@staticmethod
def identity():
"""
(staticmethod) Return a new :class:`Affine2D` object that is
the identity transform.
Unless this transform will be mutated later on, consider using
the faster :class:`IdentityTransform` class instead.
"""
return Affine2D(np.identity(3))
identity = staticmethod(identity)
def clear(self):
"""
Reset the underlying matrix to the identity transform.
"""
self._mtx = np.identity(3)
self.invalidate()
return self
def rotate(self, theta):
"""
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
a = np.cos(theta)
b = np.sin(theta)
rotate_mtx = np.array(
[[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(rotate_mtx, self._mtx)
self.invalidate()
return self
def rotate_deg(self, degrees):
"""
Add a rotation (in degrees) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.rotate(degrees*np.pi/180.)
def rotate_around(self, x, y, theta):
"""
Add a rotation (in radians) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
"""
Add a rotation (in degrees) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
"""
Adds a translation in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
translate_mtx = np.array(
[[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(translate_mtx, self._mtx)
self.invalidate()
return self
def scale(self, sx, sy=None):
"""
Adds a scale in place.
If *sy* is None, the same scale is applied in both the *x*- and
*y*-directions.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
if sy is None:
sy = sx
scale_mtx = np.array(
[[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(scale_mtx, self._mtx)
self.invalidate()
return self
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
class IdentityTransform(Affine2DBase):
"""
A special class that does on thing, the identity transform, in a
fast way.
"""
_mtx = np.identity(3)
def frozen(self):
return self
frozen.__doc__ = Affine2DBase.frozen.__doc__
def __repr__(self):
return "IdentityTransform()"
__str__ = __repr__
def get_matrix(self):
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def transform(self, points):
return points
transform.__doc__ = Affine2DBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__
def transform_path(self, path):
return path
transform_path.__doc__ = Affine2DBase.transform_path.__doc__
transform_path_affine = transform_path
transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Affine2DBase.get_affine.__doc__
inverted = get_affine
inverted.__doc__ = Affine2DBase.inverted.__doc__
class BlendedGenericTransform(Transform):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This "generic" version can handle any given child transform in the
*x*- and *y*-directions.
"""
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
# Here we ask: "Does it blend?"
Transform.__init__(self)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
def _get_is_affine(self):
return self._x.is_affine and self._y.is_affine
is_affine = property(_get_is_affine)
def frozen(self):
return blended_transform_factory(self._x.frozen(), self._y.frozen())
frozen.__doc__ = Transform.frozen.__doc__
def __repr__(self):
return "BlendedGenericTransform(%s,%s)" % (self._x, self._y)
__str__ = __repr__
def transform(self, points):
x = self._x
y = self._y
if x is y and x.input_dims == 2:
return x.transform(points)
if x.input_dims == 2:
x_points = x.transform(points)[:, 0:1]
else:
x_points = x.transform(points[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform(points)[:, 1:]
else:
y_points = y.transform(points[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray):
return ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._x.is_affine and self._y.is_affine:
return points
return self.transform(points)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def get_affine(self):
if self._invalid or self._affine is None:
if self._x.is_affine and self._y.is_affine:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._affine = Affine2D(mtx)
else:
self._affine = IdentityTransform()
self._invalid = 0
return self._affine
get_affine.__doc__ = Transform.get_affine.__doc__
class BlendedAffine2D(Affine2DBase):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This version is an optimization for the case where both child
transforms are of type :class:`Affine2DBase`.
"""
is_separable = True
def __init__(self, x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
Both *x_transform* and *y_transform* must be 2D affine
transforms.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
assert x_transform.is_affine
assert y_transform.is_affine
assert x_transform.is_separable
assert y_transform.is_separable
Transform.__init__(self)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
Affine2DBase.__init__(self)
self._mtx = None
def __repr__(self):
return "BlendedAffine2D(%s,%s)" % (self._x, self._y)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
if self._x == self._y:
self._mtx = self._x.get_matrix()
else:
x_mtx = self._x.get_matrix()
y_mtx = self._y.get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def blended_transform_factory(x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to transform
the *x*-axis and *y_transform* to transform the *y*-axis.
A faster version of the blended transform is returned for the case
where both child transforms are affine.
"""
if (isinstance(x_transform, Affine2DBase)
and isinstance(y_transform, Affine2DBase)):
return BlendedAffine2D(x_transform, y_transform)
return BlendedGenericTransform(x_transform, y_transform)
class CompositeGenericTransform(Transform):
"""
A composite transform formed by applying transform *a* then
transform *b*.
This "generic" version can handle any two arbitrary
transformations.
"""
pass_through = True
def __init__(self, a, b):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Transform.__init__(self)
self._a = a
self._b = b
self.set_children(a, b)
def frozen(self):
self._invalid = 0
frozen = composite_transform_factory(self._a.frozen(), self._b.frozen())
if not isinstance(frozen, CompositeGenericTransform):
return frozen.frozen()
return frozen
frozen.__doc__ = Transform.frozen.__doc__
def _get_is_affine(self):
return self._a.is_affine and self._b.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._a.is_separable and self._b.is_separable
is_separable = property(_get_is_separable)
def __repr__(self):
return "CompositeGenericTransform(%s, %s)" % (self._a, self._b)
__str__ = __repr__
def transform(self, points):
return self._b.transform(
self._a.transform(points))
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._a.is_affine and self._b.is_affine:
return points
return self._b.transform_non_affine(
self._a.transform(points))
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
return self._b.transform_path(
self._a.transform_path(path))
transform_path.__doc__ = Transform.transform_path.__doc__
def transform_path_affine(self, path):
return self._b.transform_path_affine(
self._a.transform_path(path))
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
if self._a.is_affine and self._b.is_affine:
return path
return self._b.transform_path_non_affine(
self._a.transform_path(path))
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
if self._a.is_affine and self._b.is_affine:
return Affine2D(np.dot(self._b.get_affine().get_matrix(),
self._a.get_affine().get_matrix()))
else:
return self._b.get_affine()
get_affine.__doc__ = Transform.get_affine.__doc__
def inverted(self):
return CompositeGenericTransform(self._b.inverted(), self._a.inverted())
inverted.__doc__ = Transform.inverted.__doc__
class CompositeAffine2D(Affine2DBase):
"""
A composite transform formed by applying transform *a* then transform *b*.
This version is an optimization that handles the case where both *a*
and *b* are 2D affines.
"""
def __init__(self, a, b):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
Both *a* and *b* must be instances of :class:`Affine2DBase`.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
assert a.is_affine
assert b.is_affine
Affine2DBase.__init__(self)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
def __repr__(self):
return "CompositeAffine2D(%s, %s)" % (self._a, self._b)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def composite_transform_factory(a, b):
"""
Create a new composite transform that is the result of applying
transform a then transform b.
Shortcut versions of the blended transform are provided for the
case where both child transforms are affine, or one or the other
is the identity transform.
Composite transforms may also be created using the '+' operator,
e.g.::
c = a + b
"""
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, AffineBase) and isinstance(b, AffineBase):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b)
class BboxTransform(Affine2DBase):
"""
:class:`BboxTransform` linearly transforms points from one
:class:`Bbox` to another :class:`Bbox`.
"""
is_separable = True
def __init__(self, boxin, boxout):
"""
Create a new :class:`BboxTransform` that linearly transforms
points from *boxin* to *boxout*.
"""
assert boxin.is_bbox
assert boxout.is_bbox
Affine2DBase.__init__(self)
self._boxin = boxin
self._boxout = boxout
self.set_children(boxin, boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransform(%s, %s)" % (self._boxin, self._boxout)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
outl, outb, outw, outh = self._boxout.bounds
x_scale = outw / inw
y_scale = outh / inh
if DEBUG and (x_scale == 0 or y_scale == 0):
raise ValueError("Transforming from or to a singular bounding box.")
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)],
[0.0 , y_scale, (-inb*y_scale+outb)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformTo(Affine2DBase):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox`.
"""
is_separable = True
def __init__(self, boxout):
"""
Create a new :class:`BboxTransformTo` that linearly transforms
points from the unit bounding box to *boxout*.
"""
assert boxout.is_bbox
Affine2DBase.__init__(self)
self._boxout = boxout
self.set_children(boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformTo(%s)" % (self._boxout)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
outl, outb, outw, outh = self._boxout.bounds
if DEBUG and (outw == 0 or outh == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[outw, 0.0, outl],
[ 0.0, outh, outb],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformFrom(Affine2DBase):
"""
:class:`BboxTransformFrom` linearly transforms points from a given
:class:`Bbox` to the unit bounding box.
"""
is_separable = True
def __init__(self, boxin):
assert boxin.is_bbox
Affine2DBase.__init__(self)
self._boxin = boxin
self.set_children(boxin)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformFrom(%s)" % (self._boxin)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
if DEBUG and (inw == 0 or inh == 0):
raise ValueError("Transforming from a singular bounding box.")
x_scale = 1.0 / inw
y_scale = 1.0 / inh
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)],
[0.0 , y_scale, (-inb*y_scale)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class ScaledTranslation(Affine2DBase):
"""
A transformation that translates by *xt* and *yt*, after *xt* and *yt*
have been transformad by the given transform *scale_trans*.
"""
def __init__(self, xt, yt, scale_trans):
Affine2DBase.__init__(self)
self._t = (xt, yt)
self._scale_trans = scale_trans
self.set_children(scale_trans)
self._mtx = None
self._inverted = None
def __repr__(self):
return "ScaledTranslation(%s)" % (self._t,)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
xt, yt = self._scale_trans.transform_point(self._t)
self._mtx = np.array([[1.0, 0.0, xt],
[0.0, 1.0, yt],
[0.0, 0.0, 1.0]],
np.float_)
self._invalid = 0
self._inverted = None
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class TransformedPath(TransformNode):
"""
A :class:`TransformedPath` caches a non-affine transformed copy of
the :class:`~matplotlib.path.Path`. This cached copy is
automatically updated when the non-affine part of the transform
changes.
"""
def __init__(self, path, transform):
"""
Create a new :class:`TransformedPath` from the given
:class:`~matplotlib.path.Path` and :class:`Transform`.
"""
assert isinstance(transform, Transform)
TransformNode.__init__(self)
self._path = path
self._transform = transform
self.set_children(transform)
self._transformed_path = None
self._transformed_points = None
def _revalidate(self):
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._transformed_points = \
Path(self._transform.transform_non_affine(self._path.vertices))
self._invalid = 0
def get_transformed_points_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation. Unlike
:meth:`get_transformed_path_and_affine`, no interpolation will
be performed.
"""
self._revalidate()
return self._transformed_points, self.get_affine()
def get_transformed_path_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation.
"""
self._revalidate()
return self._transformed_path, self.get_affine()
def get_fully_transformed_path(self):
"""
Return a fully-transformed copy of the child path.
"""
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._invalid = 0
return self._transform.transform_path_affine(self._transformed_path)
def get_affine(self):
return self._transform.get_affine()
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
'''
Ensure the endpoints of a range are finite and not too close together.
"too close" means the interval is smaller than 'tiny' times
the maximum absolute value.
If they are too close, each will be moved by the 'expander'.
If 'increasing' is True and vmin > vmax, they will be swapped,
regardless of whether they are too close.
If either is inf or -inf or nan, return - expander, expander.
'''
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
if vmax - vmin <= max(abs(vmin), abs(vmax)) * tiny:
if vmin == 0.0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax
def interval_contains(interval, val):
a, b = interval
return (
((a < b) and (a <= val and b >= val))
or (b <= val and a >= val))
def interval_contains_open(interval, val):
a, b = interval
return (
((a < b) and (a < val and b > val))
or (b < val and a > val))
def offset_copy(trans, fig, x=0.0, y=0.0, units='inches'):
'''
Return a new transform with an added offset.
args:
trans is any transform
kwargs:
fig is the current figure; it can be None if units are 'dots'
x, y give the offset
units is 'inches', 'points' or 'dots'
'''
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif not units == 'inches':
raise ValueError('units must be dots, points, or inches')
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
| agpl-3.0 |
Akshay0724/scikit-learn | examples/neighbors/plot_kde_1d.py | 60 | 5120 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
b09dan/universities_sentiment | text_class/demo/demo_4_news.py | 1 | 3390 | import itertools
import json
import numpy
import gensim
from gensim.models import Word2Vec
from pynlc.test_data import news_classes, word2vec
from pynlc.text_classifier import TextClassifier
from pynlc.text_processor import TextProcessor
from sklearn.metrics import mean_squared_error
def classification_demo(data_path, train_before, test_before, train_epochs, test_labels_path, instantiated_test_labels_path, trained_path):
with open(data_path, 'r', encoding='utf-8') as data_source:
data = json.load(data_source)
texts = [item["text"] for item in data]
class_names = [item["classes"] for item in data]
train_texts = texts[:train_before]
train_classes = class_names[:train_before]
test_texts = texts[train_before:test_before]
test_classes = class_names[train_before:test_before]
text_processor = TextProcessor("english", [["turn", "on"], ["turn", "off"]], gensim.models.KeyedVectors.load_word2vec_format(word2vec))
classifier = TextClassifier(text_processor)
classifier.train(train_texts, train_classes, train_epochs, True)
prediction = classifier.predict(test_texts)
with open(test_labels_path, "w", encoding="utf-8") as test_labels_output:
test_labels_output_lst = []
for i in range(0, len(prediction)):
test_labels_output_lst.append({
"real": test_classes[i],
"classified": prediction[i]
})
json.dump(test_labels_output_lst, test_labels_output)
instantiated_classifier = TextClassifier(text_processor, **classifier.config)
instantiated_prediction = instantiated_classifier.predict(test_texts)
with open(instantiated_test_labels_path, "w", encoding="utf-8") as instantiated_test_labels_output:
instantiated_test_labels_output_lst = []
for i in range(0, len(instantiated_prediction)):
instantiated_test_labels_output_lst.append({
"real": test_classes[i],
"classified": instantiated_prediction[i]
})
json.dump(instantiated_test_labels_output_lst, instantiated_test_labels_output)
with open(trained_path, "w", encoding="utf-8") as trained_output:
json.dump(classifier.config, trained_output, ensure_ascii=True)
def classification_error(files):
for name in files:
with open(name, "r", encoding="utf-8") as src:
data = json.load(src)
classes = []
real = []
for row in data:
classes.append(row["real"])
classified = row["classified"]
row_classes = list(classified.keys())
row_classes.sort()
real.append([classified[class_name] for class_name in row_classes])
labels = []
class_names = list(set(itertools.chain(*classes)))
class_names.sort()
for item_classes in classes:
labels.append([int(class_name in item_classes) for class_name in class_names])
real_np = numpy.array(real)
mse = mean_squared_error(numpy.array(labels), real_np)
print(name, mse)
if __name__ == '__main__':
print("News:\n")
classification_demo(news_classes, 500, 100, 50,
"news_test_labels.json", "instantiated_news_test_labels.json",
"news_trained.json")
classification_error(["news_test_labels.json", "instantiated_news_test_labels.json"])
| mit |
kcavagnolo/astroML | book_figures/appendix/fig_plotting_examples.py | 3 | 2381 | """
Examples of Plotting with Matplotlib
------------------------------------
Figures A.2, A.3, A.4, A.5
These scripts generate the output of the plotting examples in the appendix.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
np.random.seed(0)
#------------------------------------------------------------
# First Example: simple plot
plt.figure(1, figsize=(5, 3.75))
x = np.linspace(0, 2 * np.pi, 1000)
y = np.sin(x)
plt.plot(x, y)
plt.xlim(0, 2 * np.pi)
plt.ylim(-1.3, 1.3)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Simple Sinusoid Plot')
#------------------------------------------------------------
# Second Example: error-bars over simple plot
plt.figure(2, figsize=(5, 3.75))
x = np.linspace(0, 2 * np.pi, 1000)
y = np.sin(x)
plt.plot(x, y)
plt.xlim(0, 2 * np.pi)
plt.ylim(-1.3, 1.3)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Simple Sinusoid Plot')
x_obs = 2 * np.pi * np.random.random(50)
y_obs = np.sin(x_obs)
y_obs += np.random.normal(0, 0.1, 50)
plt.errorbar(x_obs, y_obs, 0.1, fmt='.', color='black')
#------------------------------------------------------------
# Third Example: histogram
plt.figure(3, figsize=(5, 3.75))
x = np.random.normal(size=1000)
plt.hist(x, bins=50)
plt.xlabel('x')
plt.ylabel('N(x)')
#------------------------------------------------------------
# Fourth Example: spline fitting
from scipy import interpolate
x = np.linspace(0, 16, 30)
y = np.sin(x)
x2 = np.linspace(0, 16, 1000)
spl = interpolate.UnivariateSpline(x, y, s=0)
plt.figure(4, figsize=(5, 3.75))
plt.plot(x, y, 'ok')
plt.plot(x2, spl(x2), '-k')
plt.ylim(-1.3, 1.3)
plt.show()
| bsd-2-clause |
nesterione/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
rayNymous/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/font_manager.py | 69 | 42655 | """
A module for finding, managing, and using fonts across platforms.
This module provides a single :class:`FontManager` instance that can
be shared across backends and platforms. The :func:`findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified :class:`FontProperties`
instance. The :class:`FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
Future versions may implement the Level 2 or 2.1 specifications.
Experimental support is included for using `fontconfig
<http://www.fontconfig.org>`_ on Unix variant plaforms (Linux, OS X,
Solaris). To enable it, set the constant ``USE_FONTCONFIG`` in this
file to ``True``. Fontconfig has the advantage that it is the
standard way to look up fonts on X11 platforms, so if a font is
installed, it is much more likely to be found.
"""
"""
KNOWN ISSUES
- documentation
- font variant is untested
- font stretch is incomplete
- font size is incomplete
- font size_adjust is incomplete
- default font algorithm needs improvement and testing
- setWeights function needs improvement
- 'light' is an invalid weight value, remove it.
- update_fonts not implemented
Authors : John Hunter <[email protected]>
Paul Barrett <[email protected]>
Michael Droettboom <[email protected]>
Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
License : matplotlib license (PSF compatible)
The font directory code is from ttfquery,
see license/LICENSE_TTFQUERY.
"""
import os, sys, glob
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import afm
from matplotlib import ft2font
from matplotlib import rcParams, get_configdir
from matplotlib.cbook import is_string_like
from matplotlib.fontconfig_pattern import \
parse_fontconfig_pattern, generate_fontconfig_pattern
try:
import cPickle as pickle
except ImportError:
import pickle
USE_FONTCONFIG = False
verbose = matplotlib.verbose
font_scalings = {
'xx-small' : 0.579,
'x-small' : 0.694,
'small' : 0.833,
'medium' : 1.0,
'large' : 1.200,
'x-large' : 1.440,
'xx-large' : 1.728,
'larger' : 1.2,
'smaller' : 0.833,
None : 1.0}
stretch_dict = {
'ultra-condensed' : 100,
'extra-condensed' : 200,
'condensed' : 300,
'semi-condensed' : 400,
'normal' : 500,
'semi-expanded' : 600,
'expanded' : 700,
'extra-expanded' : 800,
'ultra-expanded' : 900}
weight_dict = {
'ultralight' : 100,
'light' : 200,
'normal' : 400,
'regular' : 400,
'book' : 400,
'medium' : 500,
'roman' : 500,
'semibold' : 600,
'demibold' : 600,
'demi' : 600,
'bold' : 700,
'heavy' : 800,
'extra bold' : 800,
'black' : 900}
font_family_aliases = set([
'serif',
'sans-serif',
'cursive',
'fantasy',
'monospace',
'sans'])
# OS Font paths
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/"
]
if not USE_FONTCONFIG:
home = os.environ.get('HOME')
if home is not None:
# user fonts on OSX
path = os.path.join(home, 'Library', 'Fonts')
OSXFontDirectories.append(path)
path = os.path.join(home, '.fonts')
X11FontDirectories.append(path)
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions extensions that are synonyms for
the given file extension *fileext*.
"""
return {'ttf': ('ttf', 'otf'),
'otf': ('ttf', 'otf'),
'afm': ('afm',)}[fontext]
def win32FontDirectory():
"""
Return the user-specified font directory for Win32. This is
looked up from the registry key::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, $WINDIR/Fonts will be returned.
"""
try:
import _winreg
except ImportError:
pass # Fall through to default
else:
try:
user = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, MSFolders)
try:
try:
return _winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
pass # Fall through to default
finally:
_winreg.CloseKey(user)
except OSError:
pass # Fall through to default
return os.path.join(os.environ['WINDIR'], 'Fonts')
def win32InstalledFonts(directory=None, fontext='ttf'):
"""
Search for fonts in the specified font directory, or use the
system directories if none given. A list of TrueType font
filenames are returned by default, or AFM fonts if *fontext* ==
'afm'.
"""
import _winreg
if directory is None:
directory = win32FontDirectory()
fontext = get_fontext_synonyms(fontext)
key, items = None, {}
for fontdir in MSFontDirectories:
try:
local = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, fontdir)
except OSError:
continue
if not local:
files = []
for ext in fontext:
files.extend(glob.glob(os.path.join(directory, '*.'+ext)))
return files
try:
for j in range(_winreg.QueryInfoKey(local)[1]):
try:
key, direc, any = _winreg.EnumValue( local, j)
if not os.path.dirname(direc):
direc = os.path.join(directory, direc)
direc = os.path.abspath(direc).lower()
if os.path.splitext(direc)[1][1:] in fontext:
items[direc] = 1
except EnvironmentError:
continue
except WindowsError:
continue
return items.keys()
finally:
_winreg.CloseKey(local)
return None
def OSXFontDirectory():
"""
Return the system font directories for OS X. This is done by
starting at the list of hardcoded paths in
:attr:`OSXFontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in OSXFontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def OSXInstalledFonts(directory=None, fontext='ttf'):
"""
Get list of font files on OS X - ignores font suffix by default.
"""
if directory is None:
directory = OSXFontDirectory()
fontext = get_fontext_synonyms(fontext)
files = []
for path in directory:
if fontext is None:
files.extend(glob.glob(os.path.join(path,'*')))
else:
for ext in fontext:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
return files
def x11FontDirectory():
"""
Return the system font directories for X11. This is done by
starting at the list of hardcoded paths in
:attr:`X11FontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in X11FontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def get_fontconfig_fonts(fontext='ttf'):
"""
Grab a list of all the fonts that are being tracked by fontconfig
by making a system call to ``fc-list``. This is an easy way to
grab all of the fonts the user wants to be made available to
applications, without needing knowing where all of them reside.
"""
try:
import commands
except ImportError:
return {}
fontext = get_fontext_synonyms(fontext)
fontfiles = {}
status, output = commands.getstatusoutput("fc-list file")
if status == 0:
for line in output.split('\n'):
fname = line.split(':')[0]
if (os.path.splitext(fname)[1][1:] in fontext and
os.path.exists(fname)):
fontfiles[fname] = 1
return fontfiles
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = {}
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
fontdir = win32FontDirectory()
fontpaths = [fontdir]
# now get all installed fonts directly...
for f in win32InstalledFonts(fontdir):
base, ext = os.path.splitext(f)
if len(ext)>1 and ext[1:].lower() in fontexts:
fontfiles[f] = 1
else:
fontpaths = x11FontDirectory()
# check for OS X & load its fonts if present
if sys.platform == 'darwin':
for f in OSXInstalledFonts(fontext=fontext):
fontfiles[f] = 1
for f in get_fontconfig_fonts(fontext):
fontfiles[f] = 1
elif isinstance(fontpaths, (str, unicode)):
fontpaths = [fontpaths]
for path in fontpaths:
files = []
for ext in fontexts:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
for fname in files:
fontfiles[os.path.abspath(fname)] = 1
return [fname for fname in fontfiles.keys() if os.path.exists(fname)]
def weight_as_number(weight):
"""
Return the weight property as a numeric value. String values
are converted to their corresponding numeric value.
"""
if isinstance(weight, str):
try:
weight = weight_dict[weight.lower()]
except KeyError:
weight = 400
elif weight in range(100, 1000, 100):
pass
else:
raise ValueError, 'weight not a valid integer'
return weight
class FontEntry(object):
"""
A class for storing Font properties. It is used when populating
the font lookup dictionary.
"""
def __init__(self,
fname ='',
name ='',
style ='normal',
variant='normal',
weight ='normal',
stretch='normal',
size ='medium',
):
self.fname = fname
self.name = name
self.style = style
self.variant = variant
self.weight = weight
self.stretch = stretch
try:
self.size = str(float(size))
except ValueError:
self.size = size
def ttfFontProperty(font):
"""
A function for populating the :class:`FontKey` by extracting
information from the TrueType font file.
*font* is a :class:`FT2Font` instance.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
sfnt2 = sfnt.get((1,0,0,2))
sfnt4 = sfnt.get((1,0,0,4))
if sfnt2:
sfnt2 = sfnt2.lower()
else:
sfnt2 = ''
if sfnt4:
sfnt4 = sfnt4.lower()
else:
sfnt4 = ''
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif font.style_flags & ft2font.ITALIC:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = None
for w in weight_dict.keys():
if sfnt4.find(w) >= 0:
weight = w
break
if not weight:
if font.style_flags & ft2font.BOLD:
weight = 700
else:
weight = 400
weight = weight_as_number(weight)
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
if sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or \
sfnt4.find('cond') >= 0:
stretch = 'condensed'
elif sfnt4.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# !!!! Incomplete
if font.scalable:
size = 'scalable'
else:
size = str(float(font.get_fontsize()))
# !!!! Incomplete
size_adjust = None
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
A function for populating a :class:`FontKey` instance by
extracting information from the AFM font file.
*font* is a class:`AFM` instance.
"""
name = font.get_familyname()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or name.lower().find('italic') >= 0:
style = 'italic'
elif name.lower().find('oblique') >= 0:
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = weight_as_number(font.get_weight().lower())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
# !!!! Incomplete
size_adjust = None
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def createFontList(fontfiles, fontext='ttf'):
"""
A function to create a font lookup list. The default is to create
a list of TrueType fonts. An AFM font list can optionally be
created.
"""
fontlist = []
# Add fonts from list of known font files.
seen = {}
for fpath in fontfiles:
verbose.report('createFontDict: %s' % (fpath), 'debug')
fname = os.path.split(fpath)[1]
if fname in seen: continue
else: seen[fname] = 1
if fontext == 'afm':
try:
fh = open(fpath, 'r')
except:
verbose.report("Could not open font file %s" % fpath)
continue
try:
try:
font = afm.AFM(fh)
finally:
fh.close()
except RuntimeError:
verbose.report("Could not parse font file %s"%fpath)
continue
prop = afmFontProperty(fpath, font)
else:
try:
font = ft2font.FT2Font(str(fpath))
except RuntimeError:
verbose.report("Could not open font file %s"%fpath)
continue
except UnicodeError:
verbose.report("Cannot handle unicode filenames")
#print >> sys.stderr, 'Bad file is', fpath
continue
try: prop = ttfFontProperty(font)
except: continue
fontlist.append(prop)
return fontlist
class FontProperties(object):
"""
A class for storing and manipulating font properties.
The font properties are those described in the `W3C Cascading
Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
specification. The six properties are:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either
'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
In that case, the actual font to be used will be looked up
from the associated rcParam in :file:`matplotlibrc`.
- style: Either 'normal', 'italic' or 'oblique'.
- variant: Either 'normal' or 'small-caps'.
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'
- size: Either an relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g. 12
The default font property for TrueType fonts (as specified in the
default :file:`matplotlibrc` file) is::
sans-serif, normal, normal, normal, normal, scalable.
Alternatively, a font may be specified using an absolute path to a
.ttf file, by using the *fname* kwarg.
The preferred usage of font sizes is to use the relative values,
e.g. 'large', instead of absolute font sizes, e.g. 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size, i.e. by using the
:meth:`FontManager.set_default_size` method.
This class will also accept a `fontconfig
<http://www.fontconfig.org/>`_ pattern, if it is the only argument
provided. See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_. This support
does not require fontconfig to be installed. We are merely
borrowing its pattern syntax for use here.
Note that matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in matplotlib than in other applications that use
fontconfig.
"""
def __init__(self,
family = None,
style = None,
variant= None,
weight = None,
stretch= None,
size = None,
fname = None, # if this is set, it's a hardcoded filename to use
_init = None # used only by copy()
):
self._family = None
self._slant = None
self._variant = None
self._weight = None
self._stretch = None
self._size = None
self._file = None
# This is used only by copy()
if _init is not None:
self.__dict__.update(_init.__dict__)
return
if is_string_like(family):
# Treat family as a fontconfig pattern if it is the only
# parameter provided.
if (style is None and
variant is None and
weight is None and
stretch is None and
size is None and
fname is None):
self.set_fontconfig_pattern(family)
return
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
def _parse_fontconfig_pattern(self, pattern):
return parse_fontconfig_pattern(pattern)
def __hash__(self):
l = self.__dict__.items()
l.sort()
return hash(repr(l))
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of font names that comprise the font family.
"""
if self._family is None:
family = rcParams['font.family']
if is_string_like(family):
return [family]
return family
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font
properties.
"""
return ft2font.FT2Font(str(findfont(self))).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if self._slant is None:
return rcParams['font.style']
return self._slant
get_slant = get_style
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or
'small-caps'.
"""
if self._variant is None:
return rcParams['font.variant']
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
if self._weight is None:
return rcParams['font.weight']
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
if self._stretch is None:
return rcParams['font.stretch']
return self._stretch
def get_size(self):
"""
Return the font size.
"""
if self._size is None:
return rcParams['font.size']
return self._size
def get_size_in_points(self):
if self._size is not None:
try:
return float(self._size)
except ValueError:
pass
default_size = fontManager.get_default_size()
return default_size * font_scalings.get(self._size)
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig pattern suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. May be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', or a real font name.
"""
if family is None:
self._family = None
else:
if is_string_like(family):
family = [family]
self._family = family
set_name = set_family
def set_style(self, style):
"""
Set the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if style not in ('normal', 'italic', 'oblique', None):
raise ValueError("style must be normal, italic or oblique")
self._slant = style
set_slant = set_style
def set_variant(self, variant):
"""
Set the font variant. Values are: 'normal' or 'small-caps'.
"""
if variant not in ('normal', 'small-caps', None):
raise ValueError("variant must be normal or small-caps")
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight. May be either a numeric value in the
range 0-1000 or one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
"""
if weight is not None:
try:
weight = int(weight)
if weight < 0 or weight > 1000:
raise ValueError()
except ValueError:
if weight not in weight_dict:
raise ValueError("weight is invalid")
self._weight = weight
def set_stretch(self, stretch):
"""
Set the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded' or
'ultra-expanded', or a numeric value in the range 0-1000.
"""
if stretch is not None:
try:
stretch = int(stretch)
if stretch < 0 or stretch > 1000:
raise ValueError()
except ValueError:
if stretch not in stretch_dict:
raise ValueError("stretch is invalid")
self._stretch = stretch
def set_size(self, size):
"""
Set the font size. Either an relative value of 'xx-small',
'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
or an absolute font size, e.g. 12.
"""
if size is not None:
try:
size = float(size)
except ValueError:
if size is not None and size not in font_scalings:
raise ValueError("size is invalid")
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = file
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig *pattern*.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
for key, val in self._parse_fontconfig_pattern(pattern).items():
if type(val) == list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def copy(self):
"""Return a deep copy of self"""
return FontProperties(_init = self)
def ttfdict_to_fnames(d):
"""
flatten a ttfdict to all the filenames it contains
"""
fnames = []
for named in d.values():
for styled in named.values():
for variantd in styled.values():
for weightd in variantd.values():
for stretchd in weightd.values():
for fname in stretchd.values():
fnames.append(fname)
return fnames
def pickle_dump(data, filename):
"""
Equivalent to pickle.dump(data, open(filename, 'w'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'w')
try:
pickle.dump(data, fh)
finally:
fh.close()
def pickle_load(filename):
"""
Equivalent to pickle.load(open(filename, 'r'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'r')
try:
data = pickle.load(fh)
finally:
fh.close()
return data
class FontManager:
"""
On import, the :class:`FontManager` singleton instance creates a
list of TrueType fonts based on the font properties: name, style,
variant, weight, stretch, and size. The :meth:`findfont` method
does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, a
default font is returned.
"""
def __init__(self, size=None, weight='normal'):
self.__default_weight = weight
self.default_size = size
paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
os.path.join(rcParams['datapath'], 'fonts', 'afm')]
# Create list of font paths
for pathname in ['TTFPATH', 'AFMPATH']:
if pathname in os.environ:
ttfpath = os.environ[pathname]
if ttfpath.find(';') >= 0: #win32 style
paths.extend(ttfpath.split(';'))
elif ttfpath.find(':') >= 0: # unix style
paths.extend(ttfpath.split(':'))
else:
paths.append(ttfpath)
verbose.report('font search path %s'%(str(paths)))
# Load TrueType fonts and create font dictionary.
self.ttffiles = findSystemFonts(paths) + findSystemFonts()
for fname in self.ttffiles:
verbose.report('trying fontname %s' % fname, 'debug')
if fname.lower().find('vera.ttf')>=0:
self.defaultFont = fname
break
else:
# use anything
self.defaultFont = self.ttffiles[0]
self.ttflist = createFontList(self.ttffiles)
if rcParams['pdf.use14corefonts']:
# Load only the 14 PDF core fonts. These fonts do not need to be
# embedded; every PDF viewing application is required to have them:
# Helvetica, Helvetica-Bold, Helvetica-Oblique, Helvetica-BoldOblique,
# Courier, Courier-Bold, Courier-Oblique, Courier-BoldOblique,
# Times-Roman, Times-Bold, Times-Italic, Times-BoldItalic, Symbol,
# ZapfDingbats.
afmpath = os.path.join(rcParams['datapath'],'fonts','pdfcorefonts')
afmfiles = findSystemFonts(afmpath, fontext='afm')
self.afmlist = createFontList(afmfiles, fontext='afm')
else:
self.afmfiles = findSystemFonts(paths, fontext='afm') + \
findSystemFonts(fontext='afm')
self.afmlist = createFontList(self.afmfiles, fontext='afm')
self.ttf_lookup_cache = {}
self.afm_lookup_cache = {}
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
def get_default_size(self):
"""
Return the default font size.
"""
if self.default_size is None:
return rcParams['font.size']
return self.default_size
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
def set_default_size(self, size):
"""
Set the default font size in points. The initial value is set
by ``font.size`` in rc.
"""
self.default_size = size
def update_fonts(self, filenames):
"""
Update the font dictionary with new font files.
Currently not implemented.
"""
# !!!! Needs implementing
raise NotImplementedError
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Returns a match score between the list of font families in
*families* and the font family name *family2*.
An exact match anywhere in the list returns 0.0.
A match by generic font name will return 0.1.
No match will return 1.0.
"""
for i, family1 in enumerate(families):
if family1.lower() in font_family_aliases:
if family1 == 'sans':
family1 == 'sans-serif'
options = rcParams['font.' + family1]
if family2 in options:
idx = options.index(family2)
return 0.1 * (float(idx) / len(options))
elif family1.lower() == family2.lower():
return 0.0
return 1.0
def score_style(self, style1, style2):
"""
Returns a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif style1 in ('italic', 'oblique') and \
style2 in ('italic', 'oblique'):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Returns a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Returns a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Returns a match score between *weight1* and *weight2*.
The result is the absolute value of the difference between the
CSS numeric values of *weight1* and *weight2*, normalized
between 0.0 and 1.0.
"""
try:
weightval1 = int(weight1)
except ValueError:
weightval1 = weight_dict.get(weight1, 500)
try:
weightval2 = int(weight2)
except ValueError:
weightval2 = weight_dict.get(weight2, 500)
return abs(weightval1 - weightval2) / 1000.0
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings(size1)
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
def findfont(self, prop, fontext='ttf'):
"""
Search the font list for the font that most closely matches
the :class:`FontProperties` *prop*.
:meth:`findfont` performs a nearest neighbor search. Each
font is given a similarity score to the target font
properties. The first font with the highest score is
returned. If no matches below a certain threshold are found,
the default font (usually Vera Sans) is returned.
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
"""
debug = False
if prop is None:
return self.defaultFont
if is_string_like(prop):
prop = FontProperties(prop)
fname = prop.get_file()
if fname is not None:
verbose.report('findfont returning %s'%fname, 'debug')
return fname
if fontext == 'afm':
font_cache = self.afm_lookup_cache
fontlist = self.afmlist
else:
font_cache = self.ttf_lookup_cache
fontlist = self.ttflist
cached = font_cache.get(hash(prop))
if cached:
return cached
best_score = 1e64
best_font = None
for font in fontlist:
# Matching family should have highest priority, so it is multiplied
# by 10.0
score = \
self.score_family(prop.get_family(), font.name) * 10.0 + \
self.score_style(prop.get_style(), font.style) + \
self.score_variant(prop.get_variant(), font.variant) + \
self.score_weight(prop.get_weight(), font.weight) + \
self.score_stretch(prop.get_stretch(), font.stretch) + \
self.score_size(prop.get_size(), font.size)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
verbose.report('findfont: Could not match %s. Returning %s' %
(prop, self.defaultFont))
result = self.defaultFont
else:
verbose.report('findfont: Matching %s to %s (%s) with score of %f' %
(prop, best_font.name, best_font.fname, best_score))
result = best_font.fname
font_cache[hash(prop)] = result
return result
_is_opentype_cff_font_cache = {}
def is_opentype_cff_font(filename):
"""
Returns True if the given font is a Postscript Compact Font Format
Font embedded in an OpenType wrapper. Used by the PostScript and
PDF backends that can not subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
result = _is_opentype_cff_font_cache.get(filename)
if result is None:
fd = open(filename, 'rb')
tag = fd.read(4)
fd.close()
result = (tag == 'OTTO')
_is_opentype_cff_font_cache[filename] = result
return result
return False
# The experimental fontconfig-based backend.
if USE_FONTCONFIG and sys.platform != 'win32':
import re
def fc_match(pattern, fontext):
import commands
fontexts = get_fontext_synonyms(fontext)
ext = "." + fontext
status, output = commands.getstatusoutput('fc-match -sv "%s"' % pattern)
if status == 0:
for match in _fc_match_regex.finditer(output):
file = match.group(1)
if os.path.splitext(file)[1][1:] in fontexts:
return file
return None
_fc_match_regex = re.compile(r'\sfile:\s+"([^"]*)"')
_fc_match_cache = {}
def findfont(prop, fontext='ttf'):
if not is_string_like(prop):
prop = prop.get_fontconfig_pattern()
cached = _fc_match_cache.get(prop)
if cached is not None:
return cached
result = fc_match(prop, fontext)
if result is None:
result = fc_match(':', fontext)
_fc_match_cache[prop] = result
return result
else:
_fmcache = os.path.join(get_configdir(), 'fontList.cache')
fontManager = None
def _rebuild():
global fontManager
fontManager = FontManager()
pickle_dump(fontManager, _fmcache)
verbose.report("generated new fontManager")
try:
fontManager = pickle_load(_fmcache)
fontManager.default_size = None
verbose.report("Using fontManager instance from %s" % _fmcache)
except:
_rebuild()
def findfont(prop, **kw):
global fontManager
font = fontManager.findfont(prop, **kw)
if not os.path.exists(font):
verbose.report("%s returned by pickled fontManager does not exist" % font)
_rebuild()
font = fontManager.findfont(prop, **kw)
return font
| agpl-3.0 |
LUTAN/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 137 | 2219 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/externals/joblib/parallel.py | 10 | 28398 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
import os
import sys
import gc
import warnings
from collections import Sized
from math import sqrt
import functools
import time
import threading
import itertools
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
###############################################################################
# CPU that works also when multiprocessing is not installed (python2.5)
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateApply(object):
""" A non-delayed apply function.
"""
def __init__(self, func, args, kwargs):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = func(*args, **kwargs)
def get(self):
return self.results
###############################################################################
class CallBack(object):
""" Callback used by parallel: it is used for progress reporting, and
to add data to be processed
"""
def __init__(self, index, parallel):
self.parallel = parallel
self.index = index
def __call__(self, out):
self.parallel.print_progress(self.index)
if self.parallel._original_iterable:
self.parallel.dispatch_next()
class LockedIterator(object):
"""Wrapper to protect a thread-unsafe iterable against concurrent access.
A Python generator is not thread-safe by default and will raise
ValueError("generator already executing") if two threads consume it
concurrently.
In joblib this could typically happen when the passed iterator is a
generator expression and pre_dispatch != 'all'. In that case a callback is
passed to the multiprocessing apply_async call and helper threads will
trigger the consumption of the source iterable in the dispatch_next
method.
"""
def __init__(self, it):
self._lock = threading.Lock()
self._it = iter(it)
def __iter__(self):
return self
def next(self):
with self._lock:
return next(self._it)
# For Python 3 compat
__next__ = next
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int
The number of jobs to use for the computation. If -1 all CPUs
are used. If 1 is given, no parallel computing code is used
at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The amount of jobs to be pre-dispatched. Default is 'all',
but it may be memory consuming, for instance if each job
involves a lot of a data.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 100e6 (100MB) by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend=None, verbose=0, pre_dispatch='all',
temp_folder=None, max_nbytes=100e6, mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
self._pool = None
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it.
self._output = None
self._jobs = list()
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
def dispatch(self, func, args, kwargs):
""" Queue the function for computing, with or without multiprocessing
"""
if self._pool is None:
job = ImmediateApply(func, args, kwargs)
index = len(self._jobs)
if not _verbosity_filter(index, self.verbose):
self._print('Done %3i jobs | elapsed: %s',
(index + 1,
short_format_time(time.time() - self._start_time)
))
self._jobs.append(job)
self.n_dispatched += 1
else:
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
try:
self._lock.acquire()
job = self._pool.apply_async(SafeFunction(func), args,
kwargs, callback=CallBack(self.n_dispatched, self))
self._jobs.append(job)
self.n_dispatched += 1
except AssertionError:
print('[Parallel] Pool seems closed')
finally:
self._lock.release()
def dispatch_next(self):
""" Dispatch more data for parallel processing
"""
self._dispatch_amount += 1
while self._dispatch_amount:
try:
# XXX: possible race condition shuffling the order of
# dispatches in the next two lines.
func, args, kwargs = next(self._original_iterable)
self.dispatch(func, args, kwargs)
self._dispatch_amount -= 1
except ValueError:
""" Race condition in accessing a generator, we skip,
the dispatch will be done later.
"""
except StopIteration:
self._iterating = False
self._original_iterable = None
return
def _print(self, msg, msg_args):
""" Display the message on stout or stderr depending on verbosity
"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self, index):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterable:
if _verbosity_filter(index, self.verbose):
return
self._print('Done %3i jobs | elapsed: %s',
(index + 1,
short_format_time(elapsed_time),
))
else:
# We are finished dispatching
queue_length = self.n_dispatched
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (queue_length - index + 1
- self._pre_dispatch_amount)
frequency = (queue_length // self.verbose) + 1
is_last_item = (index + 1 == queue_length)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
queue_length,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job queue can be filling up as
# we empty it
if hasattr(self, '_lock'):
self._lock.acquire()
job = self._jobs.pop(0)
if hasattr(self, '_lock'):
self._lock.release()
try:
self._output.append(job.get())
except tuple(self.exceptions) as exception:
try:
self._aborting = True
self._lock.acquire()
if isinstance(exception,
(KeyboardInterrupt, WorkerInterrupt)):
# We have captured a user interruption, clean up
# everything
if hasattr(self, '_pool'):
self._pool.close()
self._pool.terminate()
# We can now allow subprocesses again
os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0)
raise exception
elif isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (
this_report,
exception.message,
)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
raise exception_type(report)
raise exception
finally:
self._lock.release()
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
if n_jobs < 0 and mp is not None:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
self._lock = threading.Lock()
# Whether or not to set an environment flag to track
# multiple process spawning
set_environ_flag = False
if (n_jobs is None or mp is None or n_jobs == 1):
n_jobs = 1
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=2)
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=2)
else:
already_forked = int(os.environ.get('__JOBLIB_SPAWNED_PARALLEL__', 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect you main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Make sure to free as much memory as possible before forking
gc.collect()
# Set an environment variable to avoid infinite loops
set_environ_flag = True
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
pre_dispatch = self.pre_dispatch
if isinstance(iterable, Sized):
# We are given a sized (an object with len). No need to be lazy.
pre_dispatch = 'all'
if pre_dispatch == 'all' or n_jobs == 1:
self._original_iterable = None
self._pre_dispatch_amount = 0
else:
# The dispatch mechanism relies on multiprocessing helper threads
# to dispatch tasks from the original iterable concurrently upon
# job completions. As Python generators are not thread-safe we
# need to wrap it with a lock
iterable = LockedIterator(iterable)
self._original_iterable = iterable
self._dispatch_amount = 0
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions
iterable = itertools.islice(iterable, pre_dispatch)
self._start_time = time.time()
self.n_dispatched = 0
try:
if set_environ_flag:
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
self._iterating = True
for function, args, kwargs in iterable:
self.dispatch(function, args, kwargs)
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output),
len(self._output),
short_format_time(elapsed_time)
))
finally:
if n_jobs > 1:
self._pool.close()
self._pool.terminate() # terminate does a join()
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| apache-2.0 |
Sentient07/scikit-learn | sklearn/preprocessing/label.py | 28 | 28237 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
y = column_or_1d(y, warn=True)
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : array of shape [n_samples,] or [n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def fit_transform(self, y):
"""Fit label binarizer and transform multi-class labels to binary
labels.
The output of transform is sometimes referred to as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : array or sparse matrix of shape [n_samples,] or \
[n_samples, n_classes]
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if n_classes == 1:
if sparse_output:
return sp.csr_matrix((n_samples, 1), dtype=int)
else:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
return np.repeat(classes[0], len(y))
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> from sklearn.preprocessing import MultiLabelBinarizer
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
See also
--------
sklearn.preprocessing.OneHotEncoder : encode categorical integer features
using a one-hot aka one-of-K scheme.
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
# ensure yt.indices keeps its current dtype
yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,
copy=False)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
check_is_fitted(self, 'classes_')
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
check_is_fitted(self, 'classes_')
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
vybstat/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
nelson-liu/scikit-learn | examples/covariance/plot_covariance_estimation.py | 99 | 5074 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
jeffzheng1/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py | 12 | 12480 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_queue_runner as fqr
from tensorflow.python import summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import queue_runner
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [j % self._max
for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
return {self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(ordered_dict_of_arrays.values()[0])
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [j % self._max
for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [j % self._max
for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
if self._epoch == self._num_epochs:
# trim this batch, so as not to overshoot the last epoch.
batch_end_inclusive = integer_indexes.index(self._epoch_end)
integer_indexes = integer_indexes[:(batch_end_inclusive+1)]
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
def enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given array or `DataFrame`. In
the case of a pandas `DataFrame`, the first enqueued `Tensor` corresponds to
the index of the `DataFrame`. For numpy arrays, the first enqueued `Tensor`
contains the row number.
Args:
data: a numpy `ndarray or` pandas `DataFrame` that will be read into the
queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given array or `DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame` or a numpy `ndarray`.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [dtypes.as_dtype(col.dtype)
for col in data.values()]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [dtypes.as_dtype(dt)
for dt in [data.index.dtype] + list(data.dtypes)]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(capacity,
dtypes=types,
shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(get_feed_fn(placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr.FeedingQueueRunner(queue=queue,
enqueue_ops=enqueue_ops,
feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
shubhamchopra/spark | python/pyspark/sql/tests.py | 2 | 144199 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import array
import ctypes
import py4j
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
_have_pandas = False
try:
import pandas
_have_pandas = True
except:
# No Pandas, but that's okay, we'll skip those tests
pass
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings
from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings
from pyspark.tests import QuietTest, ReusedPySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
_have_arrow = False
try:
import pyarrow
_have_arrow = True
except:
# No Arrow, but that's okay, we'll skip those tests
pass
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_struct_field_type_name(self):
struct_field = StructField("a", IntegerType())
self.assertRaises(TypeError, struct_field.typeName)
class SQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.spark = SparkSession(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_multiLine_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import StringType
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_validate_column_types(self):
from pyspark.sql.functions import udf, to_json
from pyspark.sql.column import _to_java_column
self.assertTrue("Column" in _to_java_column("a").getClass().toString())
self.assertTrue("Column" in _to_java_column(u"a").getClass().toString())
self.assertTrue("Column" in _to_java_column(self.spark.range(1).id).getClass().toString())
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: _to_java_column(1))
class A():
pass
self.assertRaises(TypeError, lambda: _to_java_column(A()))
self.assertRaises(TypeError, lambda: _to_java_column([]))
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: udf(lambda x: x)(None))
self.assertRaises(TypeError, lambda: to_json(1))
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
for f in ["a", u"a"]:
aq = df.stat.approxQuantile(f, [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", u"b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile((u"a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr(u"a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_sampleby(self):
df = self.sc.parallelize([Row(a=i, b=(i % 3)) for i in range(10)]).toDF()
sampled = df.stat.sampleBy(u"b", fractions={0: 0.5, 1: 0.5}, seed=0)
self.assertTrue(sampled.count() == 3)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov(u"a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab(u"a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_string_functions(self):
from pyspark.sql.functions import col, lit
df = self.spark.createDataFrame([['nick']], schema=['name'])
self.assertRaisesRegexp(
TypeError,
"must be the same type",
lambda: df.select(col('name').substr(0, lit(1))))
if sys.version_info.major == 2:
self.assertRaises(
TypeError,
lambda: df.select(col('name').substr(long(0), long(1))))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
from pyspark.sql.types import StructType, StringType, StructField
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
from pyspark.sql.types import StructType, StringType, StructField
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace list while value is not given (default to None)
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
self.assertTupleEqual(row, (None, 10, 80.0))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_sample(self):
self.assertRaisesRegexp(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
try:
self.spark.conf.set("spark.sql.crossJoin.enabled", "false")
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
self.spark.conf.set("spark.sql.crossJoin.enabled", "true")
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
finally:
# We should unset this. Otherwise, other tests are affected.
self.spark.conf.unset("spark.sql.crossJoin.enabled")
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
# The empty bytearray is test for SPARK-21534.
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')],
[bytearray(b'')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
@unittest.skipIf(not _have_pandas, "Pandas not installed")
def test_to_pandas(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())
data = [
(1, "foo", True, 3.0), (2, "foo", True, 5.0),
(3, "bar", False, -1.0), (4, "bar", False, 6.0),
]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
@unittest.skipIf(not _have_pandas, "Pandas not installed")
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
class HiveSparkSubmitTests(SparkSubmitTests):
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
sc.stop()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_contex.stop()
def test_udf_init_shouldnt_initalize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
os.unlink(cls.tempdir.name)
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(not _have_arrow, "Arrow not installed")
class ArrowTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
cls.spark.conf.set("spark.sql.execution.arrow.enable", "true")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True)])
cls.data = [("a", 1, 10, 0.2, 2.0),
("b", 2, 20, 0.4, 4.0),
("c", 3, 30, 0.8, 6.0)]
def assertFramesEqual(self, df_with_arrow, df_without):
msg = ("DataFrame from Arrow is not equal" +
("\n\nWith Arrow:\n%s\n%s" % (df_with_arrow, df_with_arrow.dtypes)) +
("\n\nWithout:\n%s\n%s" % (df_without, df_without.dtypes)))
self.assertTrue(df_without.equals(df_with_arrow), msg=msg)
def test_unsupported_datatype(self):
schema = StructType([StructField("dt", DateType(), True)])
df = self.spark.createDataFrame([(datetime.date(1970, 1, 1),)], schema=schema)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: df.toPandas())
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
self.spark.conf.set("spark.sql.execution.arrow.enable", "false")
pdf = df.toPandas()
self.spark.conf.set("spark.sql.execution.arrow.enable", "true")
pdf_arrow = df.toPandas()
self.assertFramesEqual(pdf_arrow, pdf)
def test_pandas_round_trip(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
pdf = pd.DataFrame(data=data_dict)
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertFramesEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
@unittest.skipIf(not _have_pandas or not _have_arrow, "Pandas or Arrow not installed")
class VectorizedUDFTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
def test_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
str_f = pandas_udf(f, StringType())
int_f = pandas_udf(f, IntegerType())
long_f = pandas_udf(f, LongType())
float_f = pandas_udf(f, FloatType())
double_f = pandas_udf(f, DoubleType())
bool_f = pandas_udf(f, BooleanType())
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_boolean(self):
from pyspark.sql.functions import pandas_udf, col
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
bool_f = pandas_udf(lambda x: x, BooleanType())
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
byte_f = pandas_udf(lambda x: x, ByteType())
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
short_f = pandas_udf(lambda x: x, ShortType())
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
int_f = pandas_udf(lambda x: x, IntegerType())
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
long_f = pandas_udf(lambda x: x, LongType())
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
float_f = pandas_udf(lambda x: x, FloatType())
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
double_f = pandas_udf(lambda x: x, DoubleType())
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
from pyspark.sql.functions import pandas_udf, col
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, StringType())
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_zero_parameter(self):
from pyspark.sql.functions import pandas_udf
error_str = '0-parameter pandas_udfs.*not.*supported'
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, error_str):
pandas_udf(lambda: 1, LongType())
with self.assertRaisesRegexp(NotImplementedError, error_str):
@pandas_udf
def zero_no_type():
return 1
with self.assertRaisesRegexp(NotImplementedError, error_str):
@pandas_udf(LongType())
def zero_with_type():
return 1
def test_vectorized_udf_datatype_string(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
str_f = pandas_udf(f, 'string')
int_f = pandas_udf(f, 'integer')
long_f = pandas_udf(f, 'long')
float_f = pandas_udf(f, 'float')
double_f = pandas_udf(f, 'double')
bool_f = pandas_udf(f, 'boolean')
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_complex(self):
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
add = pandas_udf(lambda x, y: x + y, IntegerType())
power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
mul = pandas_udf(lambda x, y: x * y, DoubleType())
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_mix_udf(self):
from pyspark.sql.functions import pandas_udf, udf, col
df = self.spark.range(10)
row_by_row_udf = udf(lambda x: x, LongType())
pd_udf = pandas_udf(lambda x: x, LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Can not mix vectorized and non-vectorized UDFs'):
df.select(row_by_row_udf(col('id')), pd_udf(col('id'))).collect()
def test_vectorized_udf_chained(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: x + 1, LongType())
g = pandas_udf(lambda x: x - 1, LongType())
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: x * 1.0, StringType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Invalid.*type.*string'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_return_scalar(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: 1.0, DoubleType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*pandas_udf.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def identity(x):
return x
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda x: x, LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
| apache-2.0 |
camallen/aggregation | algorithms/blanks/serengeti3.py | 2 | 3635 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import urllib
import matplotlib.pyplot as plt
import cv2
from skimage.measure import structural_similarity as ssim
def mse(imageA, imageB):
# taken from
# http://www.pyimagesearch.com/2014/09/15/python-compare-two-images/
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
# the directory to store the movie preview clips in
image_directory = "/home/greg/Databases/serengeti/images/"
# connect to the mongodb server
client = pymongo.MongoClient()
db = client['serengeti_2015-02-22']
subjects = db["serengeti_subjects"]
false_positives = []
true_positives = []
all_files = []
reasons = []
for ii,s in enumerate(subjects.find({"tutorial":{"$ne":True},"coords":[-2.4672743413359295, 34.75278520232197]}).limit(100)):
# print s["coords"],s["created_at"]
reason = s["metadata"]["retire_reason"]
coords = s["coords"]
# print s["created_at"]
# print coords
# print s["metadata"]["timestamps"]
urls = s["location"]["standard"]
slash_indices = [i.rfind("/") for i in urls]
fnames = [str(i[j+1:]) for i,j in zip(urls,slash_indices)]
if len(fnames) == 1:
continue
for url,fname in zip(urls,fnames):
if not(os.path.isfile(image_directory+fname)):
urllib.urlretrieve(url, image_directory+fname)
all_files.append(fnames)
reasons.append(reason)
for subject_index in range(len(all_files)):
similarity = []
non_similarity = []
print subject_index
# print reasons[subject_index]
# print "---"
differences = []
for subject_index2 in range(subject_index+1,min(len(all_files),subject_index+7)):
per_image_difference = []
for fname1 in all_files[subject_index]:
for fname2 in all_files[subject_index2]:
# print image_directory+fname1
f1 = cv2.imread(image_directory+fname1)
# print image_directory+fname2
f2 = cv2.imread(image_directory+fname2)
f1 = cv2.cvtColor(f1,cv2.COLOR_BGR2GRAY)
f2 = cv2.cvtColor(f2,cv2.COLOR_BGR2GRAY)
per_image_difference.append(ssim(f1,f2))
differences.append(np.mean(per_image_difference))
# if reasons[subject_index] == reasons[subject_index2]:
# similarity.append(max(differences))
# else:
# non_similarity.append(max(differences))
# print min(similarity),max(similarity),np.mean(similarity)
# print min(non_similarity),max(non_similarity),np.mean(non_similarity)
if differences == []:
continue
if reasons[subject_index] == "blank":
false_positives.append(-max(differences))
else:
true_positives.append(-max(differences))
# create the ROC curve
alphas = true_positives[:]
alphas.extend(false_positives)
alphas.sort()
X = []
Y = []
for a in alphas:
X.append(len([x for x in false_positives if x >= a])/float(len(false_positives)))
Y.append(len([y for y in true_positives if y >= a])/float(len(true_positives)))
print len(false_positives)
print len(true_positives)
plt.plot(X,Y)
plt.plot([0,1],[0,1],"--",color="green")
plt.xlabel("False Positive Count")
plt.ylabel("True Positive Count")
plt.show() | apache-2.0 |
MridulS/sympy | examples/intermediate/mplot3d.py | 14 | 1261 | #!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x,y,z) #seems to be a bug in matplotlib
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
| bsd-3-clause |
log0ymxm/pyLDAvis | pyLDAvis/graphlab.py | 6 | 2460 | """
pyLDAvis GraphLab
===============
Helper functions to visualize GraphLab Create's TopicModel (an implementation of LDA)
"""
from __future__ import absolute_import
import funcy as fp
import numpy as np
import pandas as pd
import graphlab as gl
import pyLDAvis
def _topics_as_df(topic_model):
tdf = topic_model['topics'].to_dataframe()
return pd.DataFrame(np.vstack(tdf['topic_probabilities'].values), index=tdf['vocabulary'])
def _sum_sarray_dicts(sarray):
counts_sf = gl.SFrame({'count_dicts': sarray}).stack('count_dicts').groupby(key_columns='X1',
operations={'count': gl.aggregate.SUM('X2')})
return counts_sf.unstack(column=['X1', 'count'])[0].values()[0]
def _extract_doc_data(docs):
doc_lengths = list(docs.apply(lambda d: np.array(d.values()).sum()))
term_freqs_dict = _sum_sarray_dicts(docs)
vocab = term_freqs_dict.keys()
term_freqs = term_freqs_dict.values()
return {'doc_lengths': doc_lengths, 'vocab': vocab, 'term_frequency': term_freqs}
def _extract_model_data(topic_model, docs, vocab):
doc_topic_dists = np.vstack(topic_model.predict(docs, output_type='probabilities'))
topics = _topics_as_df(topic_model)
topic_term_dists = topics.T[vocab].values
return {'topic_term_dists': topic_term_dists, 'doc_topic_dists': doc_topic_dists}
def _extract_data(topic_model, docs):
doc_data = _extract_doc_data(docs)
model_data = _extract_model_data(topic_model, docs, doc_data['vocab'])
return fp.merge(doc_data, model_data)
def prepare(topic_model, docs, **kargs):
"""Transforms the GraphLab TopicModel and related corpus data into
the data structures needed for the visualization.
Parameters
----------
topic_model : graphlab.toolkits.topic_model.topic_model.TopicModel
An already trained GraphLab topic model.
docs : SArray of dicts
The corpus in bag of word form, the same docs used to train the model.
**kwargs :
additional keyword arguments are passed through to :func:`pyldavis.prepare`.
Returns
-------
prepared_data : PreparedData
the data structures used in the visualization
Example
--------
For example usage please see this notebook:
http://nbviewer.ipython.org/github/bmabey/pyLDAvis/blob/master/notebooks/GraphLab.ipynb
"""
opts = fp.merge(_extract_data(topic_model, docs), kargs)
return pyLDAvis.prepare(**opts)
| bsd-3-clause |
henridwyer/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
hellokathy/coursera-compinvesting1-hw | HW3/marketsim.py | 2 | 2840 | ## Computational Investing I
## HW 3 - marketsum.py
##
## Author: alexcpsec
import pandas as pd
import pandas.io.parsers as pd_par
import numpy as np
import math
import copy
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.tsutil as tsu
startCash = 1000000
orderFile = "orders_q2.csv"
valueFile = "values_q2.csv"
orderDF = pd_par.read_csv(orderFile, header=None)
# Getting the Symbols from the .csv file
ls_symbols = list(set(orderDF['X.4'].values))
# Need to sort the trades DF by increasing date
orderDF = orderDF.sort(['X.1', 'X.2', 'X.3'])
# Getting the start and end dates from the .csv file
df_lastrow = len(orderDF) - 1
dt_start = dt.datetime( orderDF.get_value(0, 'X.1'), orderDF.get_value(0, 'X.2'), orderDF.get_value(0, 'X.3'))
dt_end = dt.datetime( orderDF.get_value(df_lastrow, 'X.1'), orderDF.get_value(df_lastrow, 'X.2'), orderDF.get_value(df_lastrow, 'X.3') + 1 )
# Getting market data
dataobj = da.DataAccess('Yahoo')
ls_keys = ['close', 'actual_close']
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
# Adding CASH to our symbols and creating our trades table
ls_symbols.append("_CASH")
trades_data = pd.DataFrame(index=list(ldt_timestamps), columns=list(ls_symbols))
curr_cash = startCash
trades_data["_CASH"][ldt_timestamps[0]] = startCash
curr_stocks = dict()
for sym in ls_symbols:
curr_stocks[sym] = 0
trades_data[sym][ldt_timestamps[0]] = 0
for row in orderDF.iterrows():
row_data = row[1]
curr_date = dt.datetime(row_data['X.1'], row_data['X.2'], row_data['X.3'], 16 )
sym = row_data['X.4']
stock_value = d_data['close'][sym][curr_date]
stock_amount = row_data['X.6']
if row_data['X.5'] == "Buy":
curr_cash = curr_cash - (stock_value * stock_amount)
trades_data["_CASH"][curr_date] = curr_cash
curr_stocks[sym] = curr_stocks[sym] + stock_amount
trades_data[sym][curr_date] = curr_stocks[sym]
else:
curr_cash = curr_cash + (stock_value * stock_amount)
trades_data["_CASH"][curr_date] = curr_cash
curr_stocks[sym] = curr_stocks[sym] - stock_amount
trades_data[sym][curr_date] = curr_stocks[sym]
trades_data = trades_data.fillna(method = "pad")
value_data = pd.DataFrame(index=list(ldt_timestamps), columns=list("V"))
value_data = value_data.fillna(0)
for day in ldt_timestamps:
value = 0
for sym in ls_symbols:
if sym == "_CASH":
value = value + trades_data[sym][day]
else:
value = value + trades_data[sym][day] * d_data['close'][sym][day]
value_data["V"][day] = value
file_out = open( valueFile, "w" )
for row in value_data.iterrows():
file_out.writelines(str(row[0].strftime('%Y,%m,%d')) + ", " + str(row[1]["V"].round()) + "\n" )
file_out.close()
| mit |
roxyboy/bokeh | examples/plotting/file/burtin.py | 43 | 4765 | from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import figure, show, output_file
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
x = np.zeros(len(df))
y = np.zeros(len(df))
output_file("burtin.html", title="burtin.py example")
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color="black",
background_fill="#f0e1d2", border_fill="#f0e1d2")
p.line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(x, y, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(x, y, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(x, y, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(x, y, radius=radii, fill_color=None, line_color="white")
p.text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(x, y, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()),
text_font_size="9pt", text_align="left", text_baseline="middle")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
show(p)
| bsd-3-clause |
andreweskeclarke/reinforcement_learning | src/runner.py | 1 | 2951 | import numpy as np
import matplotlib.patches as mpatches
import yaml
from math import floor
import random
import matplotlib.pyplot as plt
import argparse
from bandits import NBandits, Bandit
from agents import *
class Environment:
def __init__(self, bandits):
self.bandits = bandits
def possible_actions(self):
return range(0, len(self.bandits))
def take(self, action):
was_action_optimal = self.bandits.is_action_optimal(action)
return self.bandits.take(action), was_action_optimal
class Sim:
def __init__(self, n_runs, n_plays, bandits_creator, agent_creator):
self.n_runs = float(n_runs)
self.n_plays = n_plays
self.create_bandits = bandits_creator
self.create_agent = agent_creator
def run(self):
self.optimal_choice_rates = [0] * self.n_plays
for run in range(0,int(self.n_runs)):
bandits = self.create_bandits(None)
env = Environment(bandits)
agent = self.create_agent(env.possible_actions())
for i in range(0,self.n_plays):
action = agent.choose()
reward, was_optimal = env.take(action)
agent.update(reward)
if was_optimal:
self.optimal_choice_rates[i] += (1/self.n_runs)
def runningMean(self, x, N):
return np.convolve(x, np.ones((N,))/N)[(N-1):]
def plot(self, color, label):
return plt.plot(range(0,self.n_plays),
self.runningMean(self.optimal_choice_rates, 25),
color=color,
label=label)
def main():
print("Start sim")
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('settings', help='Location of the settings yaml file for the desired simulation')
args = parser.parse_args()
with open(args.settings, 'r') as stream:
settings = yaml.load(stream)
n_bandits = settings['n_bandits']
n_runs = settings['n_runs']
n_plays_per_run = settings['n_plays_per_run']
patches = []
for experiment in settings['experiments']:
print(experiment)
simulation = Sim(n_runs, n_plays_per_run,
lambda _: eval(experiment['env_class'])(n_bandits, options=experiment['env_options']),
lambda actions: eval(experiment['agent_class'])(actions, options=experiment['options']))
simulation.run()
simulation.plot(experiment['color'], experiment['label'])
patches.append(mpatches.Patch(color=experiment['color'], label=experiment['label']))
plt.axis([0,n_plays_per_run,0,1.05])
plt.title("NBandits Reinforcement")
plt.plot([0,n_plays_per_run],[0.9, 0.9], '--', color='g')
plt.plot([0,n_plays_per_run],[0.95, 0.95], '--', color='b')
plt.legend(handles=patches)
plt.savefig('images/n_bandits_solutions.png')
# plt.show()
if __name__ == "__main__":
main()
| mit |
Starkiller4011/tsat | tsat/lc_var/lcvar.py | 1 | 4590 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
#####################################
# ╔╗ ┬ ┬ ┬┌─┐ ╔╦╗┌─┐┌┬┐ #
# ╠╩╗│ │ │├┤ ║║│ │ │ #
# ╚═╝┴─┘└─┘└─┘ ═╩╝└─┘ ┴ #
# ╔═╗┌─┐┌─┐┌┬┐┬ ┬┌─┐┬─┐┌─┐ #
# ╚═╗│ │├┤ │ │││├─┤├┬┘├┤ #
# ╚═╝└─┘└ ┴ └┴┘┴ ┴┴└─└─┘ #
#####################################
Author: Derek Blue
"""
# Future Imports
from __future__ import division, print_function
# Module Imports
import sys
import os
os.system('cls||echo -e \\\\033c')
print("Importing required modules...\n")
# Numpy
try:
import numpy as np
except ImportError:
print("Module 'numpy' required but not installed.")
print("Try 'pip install numpy' from a terminal.")
sys.exit()
else:
print("numpy: success")
# Pandas
try:
import pandas as pd
except ImportError:
print("Module 'pandas' required but not installed.")
print("Try 'pip install pandas' from a terminal.")
sys.exit()
else:
print("pandas: success")
# Scipy
try:
import scipy as sp
except ImportError:
print("Module 'scipy' required but not installed.")
print("Try 'pip install scipy' from a terminal.")
sys.exit()
else:
print("scipy: success")
# dfgui
try:
from dfgui import show as pdui
except ImportError:
print("Module 'dfgui' required but not installed. Defaulting to terminal display.")
print("Dataframes will only be displeyed as text in the terminal.")
print("For a GUI visualizer for dataframes install dfgui via the following:")
print("git clone https://github.com/bluenote10/PandasDataFrameGUI.git")
print("cd dfgui")
print("pip install -e .")
print("./demo.py")
print("Further documentation can be found here:")
print("https://github.com/bluenote10/PandasDataFrameGUI")
PDUI_PRESENT = False
else:
print("dfgui: success")
PDUI_PRESENT = True
# matplotlib
try:
import matplotlib.pyplot as plt
except ImportError:
print("Module 'matplotlib.pyplot' required but not installed.")
print("Try 'pip install matplotlib' from a terminal.")
sys.exit()
else:
print("matplotlib: success")
# End imports
print("\nModules loaded, setting verbosity...\n")
def load_sftable(root, filename, verbose):
'''
loads the structure function table
'''
file_path = os.path.join(root, filename)
try:
if verbose:
print('******\nTrying to open data file using tab as delimiter...')
test = np.genfromtxt(file_path, delimiter='\t',
skip_header=0, names=True)
if verbose:
print(
'******\nSuccessfully opened data file using tab as delimiter.\n******\n')
table = pd.read_table(file_path, sep='\t')
except ValueError:
if verbose:
print('******\nData file does not use tab as delimiter, trying comma...')
try:
test = np.genfromtxt(file_path, delimiter=',',
skip_header=0, names=True)
except ValueError:
if verbose:
print(
'******\nData file is not in supported format, exiting...\n******\n')
sys.exit()
if verbose:
print(
'******\nSuccessfully opened data file using comma as delimiter.\n******\n')
table = pd.read_table(file_path, sep=',')
return table
def lc_var(f_col, e_col):
'''
Return light curve variance
'''
mu = np.mean(f_col)
current_table = load_sftable('/home/dblue/Documents/School/Summer2017/PY-SF/astroSF/DATA/TAB/LC',
'mkn335_V.csv', False)
t_col = np.array(current_table['Time'].tolist(), dtype=np.float)
f_col = np.array(current_table['Flux'].tolist(), dtype=np.float)
e_col = np.array(current_table['+-'].tolist(), dtype=np.float)
def var_lc(flux_col, error_col=None):
if error_col is None:
error_col = []
for i in range(0, len(flux_col)):
error_col.append(0)
fav = 0
n = 0
for f in tqdm(flux_col, ascii=True, desc='var_pt1'):
fav += f
n += 1
mu = fav / n
n = 0
var = 0
for i, f in tqdm(enumerate(flux_col), ascii=True, desc='var_pt2'):
tmp = np.square(f - mu - error_col[i])
var += tmp
n += 1
variance = var / n
return(variance) | mit |
brightchen/h2o-3 | h2o-docs/src/api/data-science-example-1/example-native-pandas-scikit.py | 22 | 2796 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import preprocessing
# <codecell>
air_raw = DataFrame.from_csv("allyears_tiny.csv", index_col = False)
print(air_raw.head())
air_raw['RandNum'] = Series(np.random.uniform(size = len(air_raw['Origin'])))
print(air_raw.head())
# <codecell>
air_mapped = DataFrame()
air_mapped['RandNum'] = air_raw['RandNum']
air_mapped['IsDepDelayed'] = air_raw['IsDepDelayed']
air_mapped['IsDepDelayedInt'] = air_mapped.apply(lambda row:
1 if row['IsDepDelayed'] == 'YES' else 0,
axis=1)
del air_mapped['IsDepDelayed']
print(air_mapped.shape)
lb_origin = sklearn.preprocessing.LabelBinarizer()
lb_origin.fit(air_raw['Origin'])
tmp_origin = lb_origin.transform(air_raw['Origin'])
tmp_origin_df = DataFrame(tmp_origin)
print(tmp_origin_df.shape)
lb_dest = sklearn.preprocessing.LabelBinarizer()
lb_dest.fit(air_raw['Dest'])
tmp_dest = lb_origin.transform(air_raw['Dest'])
tmp_dest_df = DataFrame(tmp_dest)
print(tmp_dest_df.shape)
lb_uniquecarrier = sklearn.preprocessing.LabelBinarizer()
lb_uniquecarrier.fit(air_raw['UniqueCarrier'])
tmp_uniquecarrier = lb_origin.transform(air_raw['UniqueCarrier'])
tmp_uniquecarrier_df = DataFrame(tmp_uniquecarrier)
print(tmp_uniquecarrier_df.shape)
air_mapped = pd.concat([
air_mapped,
tmp_origin_df,
tmp_dest_df,
air_raw['Distance'],
tmp_uniquecarrier_df,
air_raw['Month'],
air_raw['DayofMonth'],
air_raw['DayOfWeek'],
],
axis=1)
print(air_mapped.shape)
air_mapped
air = air_mapped
# <codecell>
air_train = air.ix[air['RandNum'] <= 0.8]
# air_valid = air.ix[(air['RandNum'] > 0.8) & (air['RandNum'] <= 0.9)]
air_test = air.ix[air['RandNum'] > 0.9]
print(air_train.shape)
print(air_test.shape)
# <codecell>
X_train = air_train.copy(deep=True)
del X_train['RandNum']
del X_train['IsDepDelayedInt']
print(list(X_train.columns.values))
print(X_train.shape)
y_train = air_train['IsDepDelayedInt']
print(y_train.shape)
# <codecell>
clf = GradientBoostingClassifier(n_estimators = 10, max_depth = 3, learning_rate = 0.01)
clf.fit(X_train, y_train)
# <codecell>
X_test = air_test.copy(deep=True)
del X_test['RandNum']
del X_test['IsDepDelayedInt']
print(list(X_test.columns.values))
print(X_test.shape)
print("")
print("--- PREDICTIONS ---")
print("")
pred = clf.predict(X_test)
print(pred)
| apache-2.0 |
CameronTEllis/brainiak | examples/searchlight/genre_searchlight_example.py | 7 | 3829 | # The following code is designed to perform a searchlight at every voxel in the brain looking at the difference in pattern similarity between musical genres (i.e. classical and jazz). In the study where the data was obtained, subjects were required to listen to a set of 16 songs twice (two runs) in an fMRI scanner. The 16 songs consisted of 8 jazz songs and 8 classical songs. The goal of this searchlight is to find voxels that seem to represent distinct information about these different musical genres. Presumably, these voxels would be found in the auditory cortex which happens to be the most organized system in the brain for processing sound information.
import numpy as np
import time
from mpi4py import MPI
from nilearn.image import load_img
import sys
from brainiak.searchlight.searchlight import Searchlight
from scipy import stats
from scipy.sparse import random
import os
# MPI variables
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
# Generate random data
if rank == 0:
np.random.seed(0)
data1_rand = np.random.rand(91,109,91,16)
data2_rand = np.random.rand(91,109,91,16)
classical = np.random.rand(2600)
jazz = np.random.rand(2600)
d1_reshape = np.reshape(data1_rand,(91*109*91,16))
d2_reshape = np.reshape(data2_rand,(91*109*91,16))
a1 = load_img('a1plus_2mm.nii.gz')
a1_vec = np.reshape(a1.get_data(),(91*109*91))
a1_idx = np.nonzero(a1_vec)
for i in range(8):
d1_reshape[a1_idx[0],i] += classical
d1_reshape[a1_idx[0],i+8] += jazz
d2_reshape[a1_idx[0],i] += classical
d2_reshape[a1_idx[0],i+8] += jazz
data1 = np.reshape(d1_reshape,(91,109,91,16))
data2 = np.reshape(d2_reshape,(91,109,91,16))
# Flatten data, then zscore data, then reshape data back into MNI coordinate space
data1 = stats.zscore(np.reshape(data1,(91*109*91,16)))
data1 = np.reshape(data1,(91,109,91,16))
data2 = stats.zscore(np.reshape(data2,(91*109*91,16)))
data2 = np.reshape(data2,(91,109,91,16))
else:
data1 = None
data2 = None
# Load mask
mask_img = load_img('MNI152_T1_2mm_brain_mask.nii')
mask_img = mask_img.get_data()
# Definte function that takes the difference between within vs. between genre comparisons
def corr2_coeff(AB,msk,myrad,bcast_var):
if not np.all(msk):
return None
A,B = (AB[0], AB[1])
A = A.reshape((-1,A.shape[-1]))
B = B.reshape((-1,B.shape[-1]))
corrAB = np.corrcoef(A.T,B.T)[16:,:16]
classical_within = np.mean(corrAB[0:8,0:8])
jazz_within = np.mean(corrAB[8:16,8:16])
classJazz_between = np.mean(corrAB[8:16,0:8])
jazzClass_between = np.mean(corrAB[0:8,8:16])
within_genre = np.mean([classical_within,jazz_within])
between_genre = np.mean([classJazz_between,jazzClass_between])
diff = within_genre - between_genre
return diff
comm.Barrier()
begin_time = time.time()
comm.Barrier()
# Create and run searchlight
sl = Searchlight(sl_rad=1,max_blk_edge=5)
sl.distribute([data1,data2],mask_img)
sl.broadcast(None)
global_outputs = sl.run_searchlight(corr2_coeff)
comm.Barrier()
end_time = time.time()
comm.Barrier()
# Plot searchlight results
if rank == 0:
print('Searchlight Done: ', end_time - begin_time)
maxval = np.max(global_outputs[np.not_equal(global_outputs,None)])
minval = np.min(global_outputs[np.not_equal(global_outputs,None)])
global_outputs = np.array(global_outputs, dtype=np.float)
print(global_outputs)
# Save searchlight images
out_dir = "searchlight_images"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
import matplotlib.pyplot as plt
for (cnt, img) in enumerate(global_outputs):
plt.imshow(img,vmin=minval,vmax=maxval)
plt.colorbar()
plt.savefig('searchlight_images/' + 'img' + str(cnt) + '.png')
plt.clf()
| apache-2.0 |
jsamoocha/pysweat | tests/test_transform_similarities.py | 1 | 2461 | import unittest
import pandas as pd
from pysweat.transformation.similarities import cosine_similarity, cosine_to_deviation
class SimilarityTransformationTest(unittest.TestCase):
def test_cosine_similarity_similar_vectors_2d(self):
"""Should return 1 for vectors pointing in same directions"""
v1 = (1, 1)
v2 = (2, 2)
self.assertAlmostEqual(cosine_similarity(v1, v2), 1, 9)
def test_cosine_similarity_orthogonal_vectors_2d(self):
"""Should return 0 for orthogonal vectors"""
v1 = (1, 1)
v2 = (2, -2)
self.assertAlmostEqual(cosine_similarity(v1, v2), 0, 9)
def test_cosine_similarity_opposite_vectors_2d(self):
"""Should return -1 for vectors pointing in opposite directions"""
v1 = (1, 1)
v2 = (-2, -2)
self.assertAlmostEqual(cosine_similarity(v1, v2), -1, 9)
def test_cosine_similarity_floating_point_rounding_error_positive(self):
"""Should return 1 as maximum in case of floating point rounding errors"""
v1 = (0.0000015006504264572506635033732891315594, 0.0000006050474740115774352489097509533167)
v2 = (0.0000015006504264503117695994660607539117, 0.0000006050474739005551327863940969109535)
self.assertTrue(cosine_similarity(v1, v2) <= 1)
def test_cosine_similarity_floating_point_rounding_error_negative(self):
"""Should return -1 as minimum in case of floating point rounding errors"""
v1 = (0.0000015006504264572506635033732891315594, 0.0000006050474740115774352489097509533167)
v2 = (-0.0000015006504264503117695994660607539117, -0.0000006050474739005551327863940969109535)
self.assertTrue(cosine_similarity(v1, v2) >= -1)
def test_cosine_similarity_similar_vectors_3d_lists(self):
"""Should compute cosine similarity regardless vector representation or dimension"""
v1 = [1, 1, 1]
v2 = [2, 2, 2]
self.assertAlmostEqual(cosine_similarity(v1, v2), 1, 9)
def test_cosine_to_deviation(self):
"""Should return normalized angle between vectors, given a cosine similarity"""
test_df = pd.DataFrame({'cos': [1, 0, -1, 0.5]})
deviations = cosine_to_deviation(test_df).deviation.values
self.assertAlmostEqual(deviations[0], 0, 9)
self.assertAlmostEqual(deviations[1], 0.5, 9)
self.assertAlmostEqual(deviations[2], 1, 9)
self.assertAlmostEqual(deviations[3], 0.33333, 5)
| apache-2.0 |
apache/spark | python/pyspark/pandas/spark/accessors.py | 11 | 42801 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Spark related features. Usually, the features here are missing in pandas
but Spark has it.
"""
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Callable, Generic, List, Optional, Union, cast
from pyspark import StorageLevel
from pyspark.sql import Column, DataFrame as SparkDataFrame
from pyspark.sql.types import DataType, StructType
from pyspark.pandas._typing import IndexOpsLike
from pyspark.pandas.internal import InternalField
if TYPE_CHECKING:
from pyspark.sql._typing import OptionalPrimitiveType # noqa: F401 (SPARK-34943)
from pyspark._typing import PrimitiveType # noqa: F401 (SPARK-34943)
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
from pyspark.pandas.frame import CachedDataFrame # noqa: F401 (SPARK-34943)
class SparkIndexOpsMethods(Generic[IndexOpsLike], metaclass=ABCMeta):
"""Spark related features. Usually, the features here are missing in pandas
but Spark has it."""
def __init__(self, data: IndexOpsLike):
self._data = data
@property
def data_type(self) -> DataType:
"""Returns the data type as defined by Spark, as a Spark DataType object."""
return self._data._internal.spark_type_for(self._data._column_label)
@property
def nullable(self) -> bool:
"""Returns the nullability as defined by Spark."""
return self._data._internal.spark_column_nullable_for(self._data._column_label)
@property
def column(self) -> Column:
"""
Spark Column object representing the Series/Index.
.. note:: This Spark Column object is strictly stick to its base DataFrame the Series/Index
was derived from.
"""
return self._data._internal.spark_column_for(self._data._column_label)
def transform(self, func: Callable[[Column], Column]) -> IndexOpsLike:
"""
Applies a function that takes and returns a Spark column. It allows to natively
apply a Spark function and column APIs with the Spark column internally used
in Series or Index. The output length of the Spark column should be same as input's.
.. note:: It requires to have the same input and output length; therefore,
the aggregate Spark functions such as count does not work.
Parameters
----------
func : function
Function to use for transforming the data by using Spark columns.
Returns
-------
Series or Index
Raises
------
ValueError : If the output from the function is not a Spark column.
Examples
--------
>>> from pyspark.sql.functions import log
>>> df = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, columns=["a", "b"])
>>> df
a b
0 1 4
1 2 5
2 3 6
>>> df.a.spark.transform(lambda c: log(c))
0 0.000000
1 0.693147
2 1.098612
Name: a, dtype: float64
>>> df.index.spark.transform(lambda c: c + 10)
Int64Index([10, 11, 12], dtype='int64')
>>> df.a.spark.transform(lambda c: c + df.b.spark.column)
0 5
1 7
2 9
Name: a, dtype: int64
"""
from pyspark.pandas import MultiIndex
if isinstance(self._data, MultiIndex):
raise NotImplementedError("MultiIndex does not support spark.transform yet.")
output = func(self._data.spark.column)
if not isinstance(output, Column):
raise ValueError(
"The output of the function [%s] should be of a "
"pyspark.sql.Column; however, got [%s]." % (func, type(output))
)
# Trigger the resolution so it throws an exception if anything does wrong
# within the function, for example,
# `df1.a.spark.transform(lambda _: F.col("non-existent"))`.
field = InternalField.from_struct_field(
self._data._internal.spark_frame.select(output).schema.fields[0]
)
return self._data._with_new_scol(scol=output, field=field)
@property
@abstractmethod
def analyzed(self) -> IndexOpsLike:
pass
class SparkSeriesMethods(SparkIndexOpsMethods["ps.Series"]):
def apply(self, func: Callable[[Column], Column]) -> "ps.Series":
"""
Applies a function that takes and returns a Spark column. It allows to natively
apply a Spark function and column APIs with the Spark column internally used
in Series or Index.
.. note:: It forces to lose the index and end up with using default index. It is
preferred to use :meth:`Series.spark.transform` or `:meth:`DataFrame.spark.apply`
with specifying the `inedx_col`.
.. note:: It does not require to have the same length of the input and output.
However, it requires to create a new DataFrame internally which will require
to set `compute.ops_on_diff_frames` to compute even with the same origin
DataFrame that is expensive, whereas :meth:`Series.spark.transform` does not
require it.
Parameters
----------
func : function
Function to apply the function against the data by using Spark columns.
Returns
-------
Series
Raises
------
ValueError : If the output from the function is not a Spark column.
Examples
--------
>>> from pyspark import pandas as ps
>>> from pyspark.sql.functions import count, lit
>>> df = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, columns=["a", "b"])
>>> df
a b
0 1 4
1 2 5
2 3 6
>>> df.a.spark.apply(lambda c: count(c))
0 3
Name: a, dtype: int64
>>> df.a.spark.apply(lambda c: c + df.b.spark.column)
0 5
1 7
2 9
Name: a, dtype: int64
"""
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.internal import HIDDEN_COLUMNS
output = func(self._data.spark.column)
if not isinstance(output, Column):
raise ValueError(
"The output of the function [%s] should be of a "
"pyspark.sql.Column; however, got [%s]." % (func, type(output))
)
assert isinstance(self._data, Series)
sdf = self._data._internal.spark_frame.drop(*HIDDEN_COLUMNS).select(output)
# Lose index.
return first_series(DataFrame(sdf)).rename(self._data.name)
@property
def analyzed(self) -> "ps.Series":
"""
Returns a new Series with the analyzed Spark DataFrame.
After multiple operations, the underlying Spark plan could grow huge
and make the Spark planner take a long time to finish the planning.
This function is for the workaround to avoid it.
.. note:: After analyzed, operations between the analyzed Series and the original one
will **NOT** work without setting a config `compute.ops_on_diff_frames` to `True`.
Returns
-------
Series
Examples
--------
>>> ser = ps.Series([1, 2, 3])
>>> ser
0 1
1 2
2 3
dtype: int64
The analyzed one should return the same value.
>>> ser.spark.analyzed
0 1
1 2
2 3
dtype: int64
However, it won't work with the same anchor Series.
>>> ser + ser.spark.analyzed
Traceback (most recent call last):
...
ValueError: ... enable 'compute.ops_on_diff_frames' option.
>>> with ps.option_context('compute.ops_on_diff_frames', True):
... (ser + ser.spark.analyzed).sort_index()
0 2
1 4
2 6
dtype: int64
"""
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import first_series
return first_series(DataFrame(self._data._internal.resolved_copy))
class SparkIndexMethods(SparkIndexOpsMethods["ps.Index"]):
@property
def analyzed(self) -> "ps.Index":
"""
Returns a new Index with the analyzed Spark DataFrame.
After multiple operations, the underlying Spark plan could grow huge
and make the Spark planner take a long time to finish the planning.
This function is for the workaround to avoid it.
.. note:: After analyzed, operations between the analyzed Series and the original one
will **NOT** work without setting a config `compute.ops_on_diff_frames` to `True`.
Returns
-------
Index
Examples
--------
>>> idx = ps.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
The analyzed one should return the same value.
>>> idx.spark.analyzed
Int64Index([1, 2, 3], dtype='int64')
However, it won't work with the same anchor Index.
>>> idx + idx.spark.analyzed
Traceback (most recent call last):
...
ValueError: ... enable 'compute.ops_on_diff_frames' option.
>>> with ps.option_context('compute.ops_on_diff_frames', True):
... (idx + idx.spark.analyzed).sort_values()
Int64Index([2, 4, 6], dtype='int64')
"""
from pyspark.pandas.frame import DataFrame
return DataFrame(self._data._internal.resolved_copy).index
class SparkFrameMethods(object):
"""Spark related features. Usually, the features here are missing in pandas
but Spark has it."""
def __init__(self, frame: "ps.DataFrame"):
self._psdf = frame
def schema(self, index_col: Optional[Union[str, List[str]]] = None) -> StructType:
"""
Returns the underlying Spark schema.
Returns
-------
pyspark.sql.types.StructType
The underlying Spark schema.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
Examples
--------
>>> df = ps.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.spark.schema().simpleString()
'struct<a:string,b:bigint,c:tinyint,d:double,e:boolean,f:timestamp>'
>>> df.spark.schema(index_col='index').simpleString()
'struct<index:bigint,a:string,b:bigint,c:tinyint,d:double,e:boolean,f:timestamp>'
"""
return self.frame(index_col).schema
def print_schema(self, index_col: Optional[Union[str, List[str]]] = None) -> None:
"""
Prints out the underlying Spark schema in the tree format.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
Returns
-------
None
Examples
--------
>>> df = ps.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.spark.print_schema() # doctest: +NORMALIZE_WHITESPACE
root
|-- a: string (nullable = false)
|-- b: long (nullable = false)
|-- c: byte (nullable = false)
|-- d: double (nullable = false)
|-- e: boolean (nullable = false)
|-- f: timestamp (nullable = false)
>>> df.spark.print_schema(index_col='index') # doctest: +NORMALIZE_WHITESPACE
root
|-- index: long (nullable = false)
|-- a: string (nullable = false)
|-- b: long (nullable = false)
|-- c: byte (nullable = false)
|-- d: double (nullable = false)
|-- e: boolean (nullable = false)
|-- f: timestamp (nullable = false)
"""
self.frame(index_col).printSchema()
def frame(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame:
"""
Return the current DataFrame as a Spark DataFrame. :meth:`DataFrame.spark.frame` is an
alias of :meth:`DataFrame.to_spark`.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
See Also
--------
DataFrame.to_spark
DataFrame.to_pandas_on_spark
DataFrame.spark.frame
Examples
--------
By default, this method loses the index as below.
>>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df.to_spark().show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+
| a| b| c|
+---+---+---+
| 1| 4| 7|
| 2| 5| 8|
| 3| 6| 9|
+---+---+---+
>>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df.spark.frame().show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+
| a| b| c|
+---+---+---+
| 1| 4| 7|
| 2| 5| 8|
| 3| 6| 9|
+---+---+---+
If `index_col` is set, it keeps the index column as specified.
>>> df.to_spark(index_col="index").show() # doctest: +NORMALIZE_WHITESPACE
+-----+---+---+---+
|index| a| b| c|
+-----+---+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-----+---+---+---+
Keeping index column is useful when you want to call some Spark APIs and
convert it back to pandas-on-Spark DataFrame without creating a default index, which
can affect performance.
>>> spark_df = df.to_spark(index_col="index")
>>> spark_df = spark_df.filter("a == 2")
>>> spark_df.to_pandas_on_spark(index_col="index") # doctest: +NORMALIZE_WHITESPACE
a b c
index
1 2 5 8
In case of multi-index, specify a list to `index_col`.
>>> new_df = df.set_index("a", append=True)
>>> new_spark_df = new_df.to_spark(index_col=["index_1", "index_2"])
>>> new_spark_df.show() # doctest: +NORMALIZE_WHITESPACE
+-------+-------+---+---+
|index_1|index_2| b| c|
+-------+-------+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-------+-------+---+---+
Likewise, can be converted to back to pandas-on-Spark DataFrame.
>>> new_spark_df.to_pandas_on_spark(
... index_col=["index_1", "index_2"]) # doctest: +NORMALIZE_WHITESPACE
b c
index_1 index_2
0 1 4 7
1 2 5 8
2 3 6 9
"""
from pyspark.pandas.utils import name_like_string
psdf = self._psdf
data_column_names = []
data_columns = []
for i, (label, spark_column, column_name) in enumerate(
zip(
psdf._internal.column_labels,
psdf._internal.data_spark_columns,
psdf._internal.data_spark_column_names,
)
):
name = str(i) if label is None else name_like_string(label)
data_column_names.append(name)
if column_name != name:
spark_column = spark_column.alias(name)
data_columns.append(spark_column)
if index_col is None:
return psdf._internal.spark_frame.select(data_columns)
else:
if isinstance(index_col, str):
index_col = [index_col]
old_index_scols = psdf._internal.index_spark_columns
if len(index_col) != len(old_index_scols):
raise ValueError(
"length of index columns is %s; however, the length of the given "
"'index_col' is %s." % (len(old_index_scols), len(index_col))
)
if any(col in data_column_names for col in index_col):
raise ValueError("'index_col' cannot be overlapped with other columns.")
new_index_scols = [
index_scol.alias(col) for index_scol, col in zip(old_index_scols, index_col)
]
return psdf._internal.spark_frame.select(new_index_scols + data_columns)
def cache(self) -> "CachedDataFrame":
"""
Yields and caches the current DataFrame.
The pandas-on-Spark DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
If you want to specify the StorageLevel manually, use :meth:`DataFrame.spark.persist`
See Also
--------
DataFrame.spark.persist
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.spark.cache() as cached_df:
... print(cached_df.count())
...
dogs 4
cats 4
dtype: int64
>>> df = df.spark.cache()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.spark.unpersist()
"""
from pyspark.pandas.frame import CachedDataFrame
self._psdf._update_internal_frame(
self._psdf._internal.resolved_copy, requires_same_anchor=False
)
return CachedDataFrame(self._psdf._internal)
def persist(
self, storage_level: StorageLevel = StorageLevel.MEMORY_AND_DISK
) -> "CachedDataFrame":
"""
Yields and caches the current DataFrame with a specific StorageLevel.
If a StogeLevel is not given, the `MEMORY_AND_DISK` level is used by default like PySpark.
The pandas-on-Spark DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
See Also
--------
DataFrame.spark.cache
Examples
--------
>>> import pyspark
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
Set the StorageLevel to `MEMORY_ONLY`.
>>> with df.spark.persist(pyspark.StorageLevel.MEMORY_ONLY) as cached_df:
... print(cached_df.spark.storage_level)
... print(cached_df.count())
...
Memory Serialized 1x Replicated
dogs 4
cats 4
dtype: int64
Set the StorageLevel to `DISK_ONLY`.
>>> with df.spark.persist(pyspark.StorageLevel.DISK_ONLY) as cached_df:
... print(cached_df.spark.storage_level)
... print(cached_df.count())
...
Disk Serialized 1x Replicated
dogs 4
cats 4
dtype: int64
If a StorageLevel is not given, it uses `MEMORY_AND_DISK` by default.
>>> with df.spark.persist() as cached_df:
... print(cached_df.spark.storage_level)
... print(cached_df.count())
...
Disk Memory Serialized 1x Replicated
dogs 4
cats 4
dtype: int64
>>> df = df.spark.persist()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.spark.unpersist()
"""
from pyspark.pandas.frame import CachedDataFrame
self._psdf._update_internal_frame(
self._psdf._internal.resolved_copy, requires_same_anchor=False
)
return CachedDataFrame(self._psdf._internal, storage_level=storage_level)
def hint(self, name: str, *parameters: "PrimitiveType") -> "ps.DataFrame":
"""
Specifies some hint on the current DataFrame.
Parameters
----------
name : A name of the hint.
parameters : Optional parameters.
Returns
-------
ret : DataFrame with the hint.
See Also
--------
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Examples
--------
>>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value']).set_index('lkey')
>>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value']).set_index('rkey')
>>> merged = df1.merge(df2.spark.hint("broadcast"), left_index=True, right_index=True)
>>> merged.spark.explain() # doctest: +ELLIPSIS
== Physical Plan ==
...
...BroadcastHashJoin...
...
"""
from pyspark.pandas.frame import DataFrame
internal = self._psdf._internal.resolved_copy
return DataFrame(internal.with_new_sdf(internal.spark_frame.hint(name, *parameters)))
def to_table(
self,
name: str,
format: Optional[str] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: "OptionalPrimitiveType",
) -> None:
"""
Write the DataFrame into a Spark table. :meth:`DataFrame.spark.to_table`
is an alias of :meth:`DataFrame.to_table`.
Parameters
----------
name : str, required
Table name in Spark.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default
'overwrite'. Specifies the behavior of the save operation when the table exists
already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options
Additional options passed directly to Spark.
Returns
-------
None
See Also
--------
read_table
DataFrame.to_spark_io
DataFrame.spark.to_spark_io
DataFrame.to_parquet
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_table('%s.my_table' % db, partition_cols='date')
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
self._psdf.spark.frame(index_col=index_col).write.saveAsTable(
name=name, format=format, mode=mode, partitionBy=partition_cols, **options
)
def to_spark_io(
self,
path: Optional[str] = None,
format: Optional[str] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: "OptionalPrimitiveType",
) -> None:
"""Write the DataFrame out to a Spark data source. :meth:`DataFrame.spark.to_spark_io`
is an alias of :meth:`DataFrame.to_spark_io`.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default
'overwrite'. Specifies the behavior of the save operation when data already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
None
See Also
--------
read_spark_io
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
DataFrame.spark.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json')
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
self._psdf.spark.frame(index_col=index_col).write.save(
path=path, format=format, mode=mode, partitionBy=partition_cols, **options
)
def explain(self, extended: Optional[bool] = None, mode: Optional[str] = None) -> None:
"""
Prints the underlying (logical and physical) Spark plans to the console for debugging
purpose.
Parameters
----------
extended : boolean, default ``False``.
If ``False``, prints only the physical plan.
mode : string, default ``None``.
The expected output format of plans.
Returns
-------
None
Examples
--------
>>> df = ps.DataFrame({'id': range(10)})
>>> df.spark.explain() # doctest: +ELLIPSIS
== Physical Plan ==
...
>>> df.spark.explain(True) # doctest: +ELLIPSIS
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.spark.explain("extended") # doctest: +ELLIPSIS
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.spark.explain(mode="extended") # doctest: +ELLIPSIS
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
self._psdf._internal.to_internal_spark_frame.explain(extended, mode)
def apply(
self,
func: Callable[[SparkDataFrame], SparkDataFrame],
index_col: Optional[Union[str, List[str]]] = None,
) -> "ps.DataFrame":
"""
Applies a function that takes and returns a Spark DataFrame. It allows natively
apply a Spark function and column APIs with the Spark column internally used
in Series or Index.
.. note:: set `index_col` and keep the column named as so in the output Spark
DataFrame to avoid using the default index to prevent performance penalty.
If you omit `index_col`, it will use default index which is potentially
expensive in general.
.. note:: it will lose column labels. This is a synonym of
``func(psdf.to_spark(index_col)).to_pandas_on_spark(index_col)``.
Parameters
----------
func : function
Function to apply the function against the data by using Spark DataFrame.
Returns
-------
DataFrame
Raises
------
ValueError : If the output from the function is not a Spark DataFrame.
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, columns=["a", "b"])
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.spark.apply(
... lambda sdf: sdf.selectExpr("a + b as c", "index"), index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
c
index
0 5
1 7
2 9
The case below ends up with using the default index, which should be avoided
if possible.
>>> psdf.spark.apply(lambda sdf: sdf.groupby("a").count().sort("a"))
a count
0 1 1
1 2 1
2 3 1
"""
output = func(self.frame(index_col))
if not isinstance(output, SparkDataFrame):
raise ValueError(
"The output of the function [%s] should be of a "
"pyspark.sql.DataFrame; however, got [%s]." % (func, type(output))
)
psdf = output.to_pandas_on_spark(index_col) # type: ignore
return cast("ps.DataFrame", psdf)
def repartition(self, num_partitions: int) -> "ps.DataFrame":
"""
Returns a new DataFrame partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
Parameters
----------
num_partitions : int
The target number of partitions.
Returns
-------
DataFrame
Examples
--------
>>> psdf = ps.DataFrame({"age": [5, 5, 2, 2],
... "name": ["Bob", "Bob", "Alice", "Alice"]}).set_index("age")
>>> psdf.sort_index() # doctest: +NORMALIZE_WHITESPACE
name
age
2 Alice
2 Alice
5 Bob
5 Bob
>>> new_psdf = psdf.spark.repartition(7)
>>> new_psdf.to_spark().rdd.getNumPartitions()
7
>>> new_psdf.sort_index() # doctest: +NORMALIZE_WHITESPACE
name
age
2 Alice
2 Alice
5 Bob
5 Bob
"""
from pyspark.pandas.frame import DataFrame
internal = self._psdf._internal.resolved_copy
repartitioned_sdf = internal.spark_frame.repartition(num_partitions)
return DataFrame(internal.with_new_sdf(repartitioned_sdf))
def coalesce(self, num_partitions: int) -> "ps.DataFrame":
"""
Returns a new DataFrame that has exactly `num_partitions` partitions.
.. note:: This operation results in a narrow dependency, e.g. if you go from 1000
partitions to 100 partitions, there will not be a shuffle, instead each of the 100 new
partitions will claim 10 of the current partitions. If a larger number of partitions is
requested, it will stay at the current number of partitions. However, if you're doing a
drastic coalesce, e.g. to num_partitions = 1, this may result in your computation taking
place on fewer nodes than you like (e.g. one node in the case of num_partitions = 1). To
avoid this, you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever the current
partitioning is).
Parameters
----------
num_partitions : int
The target number of partitions.
Returns
-------
DataFrame
Examples
--------
>>> psdf = ps.DataFrame({"age": [5, 5, 2, 2],
... "name": ["Bob", "Bob", "Alice", "Alice"]}).set_index("age")
>>> psdf.sort_index() # doctest: +NORMALIZE_WHITESPACE
name
age
2 Alice
2 Alice
5 Bob
5 Bob
>>> new_psdf = psdf.spark.coalesce(1)
>>> new_psdf.to_spark().rdd.getNumPartitions()
1
>>> new_psdf.sort_index() # doctest: +NORMALIZE_WHITESPACE
name
age
2 Alice
2 Alice
5 Bob
5 Bob
"""
from pyspark.pandas.frame import DataFrame
internal = self._psdf._internal.resolved_copy
coalesced_sdf = internal.spark_frame.coalesce(num_partitions)
return DataFrame(internal.with_new_sdf(coalesced_sdf))
def checkpoint(self, eager: bool = True) -> "ps.DataFrame":
"""Returns a checkpointed version of this DataFrame.
Checkpointing can be used to truncate the logical plan of this DataFrame, which is
especially useful in iterative algorithms where the plan may grow exponentially. It will be
saved to files inside the checkpoint directory set with `SparkContext.setCheckpointDir`.
Parameters
----------
eager : bool
Whether to checkpoint this DataFrame immediately
Returns
-------
DataFrame
Examples
--------
>>> psdf = ps.DataFrame({"a": ["a", "b", "c"]})
>>> psdf
a
0 a
1 b
2 c
>>> new_psdf = psdf.spark.checkpoint() # doctest: +SKIP
>>> new_psdf # doctest: +SKIP
a
0 a
1 b
2 c
"""
from pyspark.pandas.frame import DataFrame
internal = self._psdf._internal.resolved_copy
checkpointed_sdf = internal.spark_frame.checkpoint(eager)
return DataFrame(internal.with_new_sdf(checkpointed_sdf))
def local_checkpoint(self, eager: bool = True) -> "ps.DataFrame":
"""Returns a locally checkpointed version of this DataFrame.
Checkpointing can be used to truncate the logical plan of this DataFrame, which is
especially useful in iterative algorithms where the plan may grow exponentially. Local
checkpoints are stored in the executors using the caching subsystem and therefore they are
not reliable.
Parameters
----------
eager : bool
Whether to locally checkpoint this DataFrame immediately
Returns
-------
DataFrame
Examples
--------
>>> psdf = ps.DataFrame({"a": ["a", "b", "c"]})
>>> psdf
a
0 a
1 b
2 c
>>> new_psdf = psdf.spark.local_checkpoint()
>>> new_psdf
a
0 a
1 b
2 c
"""
from pyspark.pandas.frame import DataFrame
internal = self._psdf._internal.resolved_copy
checkpointed_sdf = internal.spark_frame.localCheckpoint(eager)
return DataFrame(internal.with_new_sdf(checkpointed_sdf))
@property
def analyzed(self) -> "ps.DataFrame":
"""
Returns a new DataFrame with the analyzed Spark DataFrame.
After multiple operations, the underlying Spark plan could grow huge
and make the Spark planner take a long time to finish the planning.
This function is for the workaround to avoid it.
.. note:: After analyzed, operations between the analyzed DataFrame and the original one
will **NOT** work without setting a config `compute.ops_on_diff_frames` to `True`.
Returns
-------
DataFrame
Examples
--------
>>> df = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, columns=["a", "b"])
>>> df
a b
0 1 4
1 2 5
2 3 6
The analyzed one should return the same value.
>>> df.spark.analyzed
a b
0 1 4
1 2 5
2 3 6
However, it won't work with the same anchor Series.
>>> df + df.spark.analyzed
Traceback (most recent call last):
...
ValueError: ... enable 'compute.ops_on_diff_frames' option.
>>> with ps.option_context('compute.ops_on_diff_frames', True):
... (df + df.spark.analyzed).sort_index()
a b
0 2 8
1 4 10
2 6 12
"""
from pyspark.pandas.frame import DataFrame
return DataFrame(self._psdf._internal.resolved_copy)
class CachedSparkFrameMethods(SparkFrameMethods):
"""Spark related features for cached DataFrame. This is usually created via
`df.spark.cache()`."""
def __init__(self, frame: "CachedDataFrame"):
super().__init__(frame)
@property
def storage_level(self) -> StorageLevel:
"""
Return the storage level of this cache.
Examples
--------
>>> import pyspark.pandas as ps
>>> import pyspark
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.spark.cache() as cached_df:
... print(cached_df.spark.storage_level)
...
Disk Memory Deserialized 1x Replicated
Set the StorageLevel to `MEMORY_ONLY`.
>>> with df.spark.persist(pyspark.StorageLevel.MEMORY_ONLY) as cached_df:
... print(cached_df.spark.storage_level)
...
Memory Serialized 1x Replicated
"""
return self._psdf._cached.storageLevel
def unpersist(self) -> None:
"""
The `unpersist` function is used to uncache the pandas-on-Spark DataFrame when it
is not used with `with` statement.
Returns
-------
None
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df = df.spark.cache()
To uncache the dataframe, use `unpersist` function
>>> df.spark.unpersist()
"""
if self._psdf._cached.is_cached:
self._psdf._cached.unpersist()
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
import uuid
import numpy
import pandas
from pyspark.sql import SparkSession
import pyspark.pandas.spark.accessors
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.spark.accessors.__dict__.copy()
globs["np"] = numpy
globs["pd"] = pandas
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.spark.accessors tests")
.getOrCreate()
)
db_name = "db%s" % str(uuid.uuid4()).replace("-", "")
spark.sql("CREATE DATABASE %s" % db_name)
globs["db"] = db_name
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.spark.accessors,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db_name)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
kgullikson88/Chiron-Scripts | MeasureRV.py | 1 | 2575 | """
Measure the radial velocity and vsini of flattened spectra
"""
from __future__ import print_function, division, absolute_import
import logging
import os
import glob
import matplotlib.pyplot as plt
from astropy.io import fits
import pandas as pd
import seaborn as sns
import HelperFunctions
import Fitters
# Set up plotting
sns.set_style('white')
sns.set_style('ticks')
sns.set_context('paper', font_scale=1.5)
# Set up logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Get the HDF5 filename. Might want to change this eventually.
#HDF5_FILENAME = '/Volumes/DATADRIVE/Kurucz_Grid/TS23_grid_full.hdf5'
HDF5_FILENAME = '/Users/kevingullikson/StellarLibrary/Kurucz_Grid/CHIRON_grid_air.hdf5'
PAR_LOGFILE = 'Flatten.log'
def fit(filename, model_library, teff, logg, feh=0.0, output_basename='RVFitter'):
# Read in the (assumed flattened) spectra
all_orders = HelperFunctions.ReadExtensionFits(filename)
orders = [o.copy() for o in all_orders if o.x[0] < 475 or o.x[-1] > 495]
# Set up the fitter
fitter = Fitters.RVFitter(orders, model_library=model_library,
T=teff, logg=logg, feh=feh)
header = fits.getheader(filename)
starname = header['OBJECT']
date = header['DATE-OBS'].split('T')[0]
stardata_str = '{}_{}-'.format(starname.replace(' ', ''), date.replace('-', ''))
basename = os.path.join(output_basename, stardata_str)
# Fit
fitter.fit(backend='multinest', n_live_points=1000, basename=basename, overwrite=False, init_MPI=False)
# Make a triangle plot and save it
fitter.triangle()
plt.savefig('{}triangle.pdf'.format(basename))
return fitter
if __name__ == '__main__':
file_list = glob.glob('201*/*renormalized.fits')
fitted_df = pd.read_csv(PAR_LOGFILE, header=None, names=['fname', 'star', 'date', 'teff', 'logg', 'rv'])
print(fitted_df.tail())
for filename in file_list:
logging.info('Fitting RV for {}'.format(filename))
# Find this filename in the fitted dataframe (generated while flattening the spectra)
header = fits.getheader(filename)
starname = header['OBJECT']
date = header['DATE-OBS']
print(starname, date)
subset = fitted_df.loc[(fitted_df.star==starname) & (fitted_df.date==date)]
print(subset)
teff = float(subset.teff)
logg = float(subset.logg)
logging.info('Teff = {}\nlogg = {}'.format(teff, logg))
fitter = fit(filename, HDF5_FILENAME, teff=teff, logg=logg, output_basename='RVFitter_nobalmer_veiling')
| gpl-3.0 |
live-clones/dolfin-adjoint | tests_dolfin/ode_solver/ode_solver.py | 1 | 3779 | from __future__ import print_function
try:
from dolfin import BackwardEuler
except ImportError:
from dolfin import info_red
info_red("Need dolfin > 1.2.0 for ode_solver test.")
import sys; sys.exit(0)
from dolfin import *
from dolfin_adjoint import *
import ufl.algorithms
if not hasattr(MultiStageScheme, "to_tlm"):
info_red("Need dolfin > 1.2.0 for ode_solver test.")
import sys; sys.exit(0)
mesh = UnitIntervalMesh(1)
#R = FunctionSpace(mesh, "R", 0) # in my opinion, should work, but doesn't
R = FunctionSpace(mesh, "CG", 1)
def main(u, form, time, Solver, dt):
scheme = Solver(form, u, time)
scheme.t().assign(float(time))
xs = [float(time)]
ys = [u.vector().array()[0]]
solver = PointIntegralSolver(scheme)
solver.parameters.reset_stage_solutions = True
solver.parameters.newton_solver.reset_each_step = True
for i in range(int(0.2/dt)):
solver.step(dt)
xs.append(float(time))
ys.append(u.vector().array()[0])
return (u, xs, ys)
if __name__ == "__main__":
u0 = interpolate(Constant(1.0), R, name="InitialValue")
c_f = 1.0
c = interpolate(Constant(1.0), R, name="GrowthRate")
Solver = RK4
u = u0.copy(deepcopy=True, name="Solution")
v = TestFunction(R)
time = Constant(0.0)
# FIXME: make this work in the forward code:
#expr = Expression("t", t=time)
#form = inner(expr(u, v)*dP
form = lambda u, time: inner(time*u, v)*dP
exact_u = lambda t: exp(t*t/2.0)
#form = lambda u, time: inner(u, v)*dP
#exact_u = lambda t: exp(t)
## Step 0. Check forward order-of-convergence (nothing to do with adjoints)
check = False
plot = False
if check:
if plot:
import matplotlib.pyplot as plt
dts = [0.1, 0.05, 0.025]
errors = []
for dt in dts:
u.assign(u0)
time.assign(0.0)
adj_reset()
(u, xs, ys) = main(u, form(u, time), time, Solver, dt=dt)
exact_ys = [exact_u(t) for t in xs]
errors.append(abs(ys[-1] - exact_ys[-1]))
if plot:
plt.plot(xs, ys, label="Approximate solution (dt %s)" % dt)
if dt == dts[-1]:
plt.plot(xs, exact_ys, label="Exact solution")
print("Errors: ", errors)
print("Convergence order: ", convergence_order(errors))
assert min(convergence_order(errors)) > 0.8
if plot:
plt.legend(loc="best")
plt.show()
else:
dt = 0.1
(u, xs, ys) = main(u, form(u, time), time, Solver, dt=dt)
print("Solution: ", ys[-1])
## Step 1. Check replay correctness
replay = True
if replay:
assert adjglobals.adjointer.equation_count > 0
adj_html("forward.html", "forward")
success = replay_dolfin(tol=1.0e-15, stop=True)
assert success
## Step 2. Check TLM correctness
dtm = TimeMeasure()
J = Functional(inner(u, u)*dx*dtm[FINISH_TIME])
m = Control(u)
assert m.data().vector()[0] == u0.vector()[0]
Jm = assemble(inner(u, u)*dx)
def Jhat(ic):
time = Constant(0.0)
(u, xs, ys) = main(ic, form(ic, time), time, Solver, dt=dt)
print("Perturbed functional value: ", assemble(inner(u, u)*dx))
return assemble(inner(u, u)*dx)
dJdm = compute_gradient_tlm(J, m, forget=False)
minconv_tlm = taylor_test(Jhat, m, Jm, dJdm, perturbation_direction=interpolate(Constant(1.0), R), seed=1.0)
assert minconv_tlm > 1.8
## Step 3. Check ADM correctness
dJdm = compute_gradient(J, m, forget=False)
minconv_adm = taylor_test(Jhat, m, Jm, dJdm, perturbation_direction=interpolate(Constant(1.0), R), seed=1.0)
assert minconv_adm > 1.8
| lgpl-3.0 |
CallaJun/hackprince | indico/matplotlib/figure.py | 10 | 58719 | """
The figure module provides the top-level
:class:`~matplotlib.artist.Artist`, the :class:`Figure`, which
contains all the plot elements. The following classes are defined
:class:`SubplotParams`
control the default spacing of the subplots
:class:`Figure`
top level container for all plot elements
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
from operator import itemgetter
import numpy as np
from matplotlib import rcParams
from matplotlib import docstring
from matplotlib import __version__ as _mpl_version
import matplotlib.artist as martist
from matplotlib.artist import Artist, allow_rasterization
import matplotlib.cbook as cbook
from matplotlib.cbook import Stack, iterable
from matplotlib import _image
from matplotlib.image import FigureImage
import matplotlib.colorbar as cbar
from matplotlib.axes import Axes, SubplotBase, subplot_class_factory
from matplotlib.blocking_input import BlockingMouseInput, BlockingKeyMouseInput
from matplotlib.legend import Legend
from matplotlib.patches import Rectangle
from matplotlib.projections import (get_projection_names,
process_projection_requirements)
from matplotlib.text import Text, _process_text_args
from matplotlib.transforms import (Affine2D, Bbox, BboxTransformTo,
TransformedBbox)
from matplotlib.backend_bases import NonGuiException
docstring.interpd.update(projection_names=get_projection_names())
class AxesStack(Stack):
"""
Specialization of the Stack to handle all tracking of Axes in a Figure.
This stack stores ``key, (ind, axes)`` pairs, where:
* **key** should be a hash of the args and kwargs
used in generating the Axes.
* **ind** is a serial number for tracking the order
in which axes were added.
The AxesStack is a callable, where ``ax_stack()`` returns
the current axes. Alternatively the :meth:`current_key_axes` will
return the current key and associated axes.
"""
def __init__(self):
Stack.__init__(self)
self._ind = 0
def as_list(self):
"""
Return a list of the Axes instances that have been added to the figure
"""
ia_list = [a for k, a in self._elements]
ia_list.sort()
return [a for i, a in ia_list]
def get(self, key):
"""
Return the Axes instance that was added with *key*.
If it is not present, return None.
"""
item = dict(self._elements).get(key)
if item is None:
return None
return item[1]
def _entry_from_axes(self, e):
ind, k = dict([(a, (ind, k)) for (k, (ind, a)) in self._elements])[e]
return (k, (ind, e))
def remove(self, a):
"""Remove the axes from the stack."""
Stack.remove(self, self._entry_from_axes(a))
def bubble(self, a):
"""
Move the given axes, which must already exist in the
stack, to the top.
"""
return Stack.bubble(self, self._entry_from_axes(a))
def add(self, key, a):
"""
Add Axes *a*, with key *key*, to the stack, and return the stack.
If *a* is already on the stack, don't add it again, but
return *None*.
"""
# All the error checking may be unnecessary; but this method
# is called so seldom that the overhead is negligible.
if not isinstance(a, Axes):
raise ValueError("second argument, %s, is not an Axes" % a)
try:
hash(key)
except TypeError:
raise ValueError("first argument, %s, is not a valid key" % key)
a_existing = self.get(key)
if a_existing is not None:
Stack.remove(self, (key, a_existing))
warnings.warn(
"key %s already existed; Axes is being replaced" % key)
# I don't think the above should ever happen.
if a in self:
return None
self._ind += 1
return Stack.push(self, (key, (self._ind, a)))
def current_key_axes(self):
"""
Return a tuple of ``(key, axes)`` for the active axes.
If no axes exists on the stack, then returns ``(None, None)``.
"""
if not len(self._elements):
return self._default, self._default
else:
key, (index, axes) = self._elements[self._pos]
return key, axes
def __call__(self):
return self.current_key_axes()[1]
def __contains__(self, a):
return a in self.as_list()
class SubplotParams:
"""
A class to hold the parameters for a subplot
"""
def __init__(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
All dimensions are fraction of the figure width or height.
All values default to their rc params
The following attributes are available
*left* : 0.125
The left side of the subplots of the figure
*right* : 0.9
The right side of the subplots of the figure
*bottom* : 0.1
The bottom of the subplots of the figure
*top* : 0.9
The top of the subplots of the figure
*wspace* : 0.2
The amount of width reserved for blank space between subplots
*hspace* : 0.2
The amount of height reserved for white space between subplots
"""
self.validate = True
self.update(left, bottom, right, top, wspace, hspace)
def update(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Update the current values. If any kwarg is None, default to
the current value, if set, otherwise to rc
"""
thisleft = getattr(self, 'left', None)
thisright = getattr(self, 'right', None)
thistop = getattr(self, 'top', None)
thisbottom = getattr(self, 'bottom', None)
thiswspace = getattr(self, 'wspace', None)
thishspace = getattr(self, 'hspace', None)
self._update_this('left', left)
self._update_this('right', right)
self._update_this('bottom', bottom)
self._update_this('top', top)
self._update_this('wspace', wspace)
self._update_this('hspace', hspace)
def reset():
self.left = thisleft
self.right = thisright
self.top = thistop
self.bottom = thisbottom
self.wspace = thiswspace
self.hspace = thishspace
if self.validate:
if self.left >= self.right:
reset()
raise ValueError('left cannot be >= right')
if self.bottom >= self.top:
reset()
raise ValueError('bottom cannot be >= top')
def _update_this(self, s, val):
if val is None:
val = getattr(self, s, None)
if val is None:
key = 'figure.subplot.' + s
val = rcParams[key]
setattr(self, s, val)
class Figure(Artist):
"""
The Figure instance supports callbacks through a *callbacks*
attribute which is a :class:`matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'dpi_changed', and
the callback will be called with ``func(fig)`` where fig is the
:class:`Figure` instance.
*patch*
The figure patch is drawn by a
:class:`matplotlib.patches.Rectangle` instance
*suppressComposite*
For multiple figure images, the figure will make composite
images depending on the renderer option_image_nocomposite
function. If suppressComposite is True|False, this will
override the renderer.
"""
def __str__(self):
return "Figure(%gx%g)" % tuple(self.bbox.size)
def __init__(self,
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
linewidth=0.0, # the default linewidth of the frame
frameon=None, # whether or not to draw the figure frame
subplotpars=None, # default to rc
tight_layout=None, # default to rc figure.autolayout
):
"""
*figsize*
w,h tuple in inches
*dpi*
Dots per inch
*facecolor*
The figure patch facecolor; defaults to rc ``figure.facecolor``
*edgecolor*
The figure patch edge color; defaults to rc ``figure.edgecolor``
*linewidth*
The figure patch edge linewidth; the default linewidth of the frame
*frameon*
If *False*, suppress drawing the figure frame
*subplotpars*
A :class:`SubplotParams` instance, defaults to rc
*tight_layout*
If *False* use *subplotpars*; if *True* adjust subplot
parameters using :meth:`tight_layout` with default padding.
When providing a dict containing the keys `pad`, `w_pad`, `h_pad`
and `rect`, the default :meth:`tight_layout` paddings will be
overridden.
Defaults to rc ``figure.autolayout``.
"""
Artist.__init__(self)
self.callbacks = cbook.CallbackRegistry()
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
if frameon is None:
frameon = rcParams['figure.frameon']
self.dpi_scale_trans = Affine2D()
self.dpi = dpi
self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)
self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)
self.frameon = frameon
self.transFigure = BboxTransformTo(self.bbox)
# the figurePatch name is deprecated
self.patch = self.figurePatch = Rectangle(
xy=(0, 0), width=1, height=1,
facecolor=facecolor, edgecolor=edgecolor,
linewidth=linewidth)
self._set_artist_props(self.patch)
self.patch.set_aa(False)
self._hold = rcParams['axes.hold']
self.canvas = None
self._suptitle = None
if subplotpars is None:
subplotpars = SubplotParams()
self.subplotpars = subplotpars
self.set_tight_layout(tight_layout)
self._axstack = AxesStack() # track all figure axes and current axes
self.clf()
self._cachedRenderer = None
# TODO: I'd like to dynamically add the _repr_html_ method
# to the figure in the right context, but then IPython doesn't
# use it, for some reason.
def _repr_html_(self):
# We can't use "isinstance" here, because then we'd end up importing
# webagg unconditiionally.
if (self.canvas is not None and
'WebAgg' in self.canvas.__class__.__name__):
from matplotlib.backends import backend_webagg
return backend_webagg.ipython_inline_display(self)
def show(self, warn=True):
"""
If using a GUI backend with pyplot, display the figure window.
If the figure was not created using
:func:`~matplotlib.pyplot.figure`, it will lack a
:class:`~matplotlib.backend_bases.FigureManagerBase`, and
will raise an AttributeError.
For non-GUI backends, this does nothing, in which case
a warning will be issued if *warn* is True (default).
"""
try:
manager = getattr(self.canvas, 'manager')
except AttributeError as err:
raise AttributeError("%s\n"
"Figure.show works only "
"for figures managed by pyplot, normally "
"created by pyplot.figure()." % err)
if manager is not None:
try:
manager.show()
return
except NonGuiException:
pass
if warn:
import warnings
warnings.warn(
"matplotlib is currently using a non-GUI backend, "
"so cannot show the figure")
def _get_axes(self):
return self._axstack.as_list()
axes = property(fget=_get_axes, doc="Read-only: list of axes in Figure")
def _get_dpi(self):
return self._dpi
def _set_dpi(self, dpi):
self._dpi = dpi
self.dpi_scale_trans.clear().scale(dpi, dpi)
self.callbacks.process('dpi_changed', self)
dpi = property(_get_dpi, _set_dpi)
def get_tight_layout(self):
"""
Return the Boolean flag, True to use :meth`tight_layout` when drawing.
"""
return self._tight
def set_tight_layout(self, tight):
"""
Set whether :meth:`tight_layout` is used upon drawing.
If None, the rcParams['figure.autolayout'] value will be set.
When providing a dict containing the keys `pad`, `w_pad`, `h_pad`
and `rect`, the default :meth:`tight_layout` paddings will be
overridden.
ACCEPTS: [True | False | dict | None ]
"""
if tight is None:
tight = rcParams['figure.autolayout']
self._tight = bool(tight)
self._tight_parameters = tight if isinstance(tight, dict) else {}
def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right'):
"""
Date ticklabels often overlap, so it is useful to rotate them
and right align them. Also, a common use case is a number of
subplots with shared xaxes where the x-axis is date data. The
ticklabels are often long, and it helps to rotate them on the
bottom subplot and turn them off on other subplots, as well as
turn off xlabels.
*bottom*
The bottom of the subplots for :meth:`subplots_adjust`
*rotation*
The rotation of the xtick labels
*ha*
The horizontal alignment of the xticklabels
"""
allsubplots = np.alltrue([hasattr(ax, 'is_last_row') for ax
in self.axes])
if len(self.axes) == 1:
for label in self.axes[0].get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
else:
if allsubplots:
for ax in self.get_axes():
if ax.is_last_row():
for label in ax.get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
else:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.set_xlabel('')
if allsubplots:
self.subplots_adjust(bottom=bottom)
def get_children(self):
'get a list of artists contained in the figure'
children = [self.patch]
children.extend(self.artists)
children.extend(self.axes)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.images)
children.extend(self.legends)
return children
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the figure.
Returns True,{}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
# inside = mouseevent.x >= 0 and mouseevent.y >= 0
inside = self.bbox.contains(mouseevent.x, mouseevent.y)
return inside, {}
def get_window_extent(self, *args, **kwargs):
'get the figure bounding box in display space; kwargs are void'
return self.bbox
def suptitle(self, t, **kwargs):
"""
Add a centered title to the figure.
kwargs are :class:`matplotlib.text.Text` properties. Using figure
coordinates, the defaults are:
*x* : 0.5
The x location of the text in figure coords
*y* : 0.98
The y location of the text in figure coords
*horizontalalignment* : 'center'
The horizontal alignment of the text
*verticalalignment* : 'top'
The vertical alignment of the text
A :class:`matplotlib.text.Text` instance is returned.
Example::
fig.suptitle('this is the figure title', fontsize=12)
"""
x = kwargs.pop('x', 0.5)
y = kwargs.pop('y', 0.98)
if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):
kwargs['horizontalalignment'] = 'center'
if ('verticalalignment' not in kwargs) and ('va' not in kwargs):
kwargs['verticalalignment'] = 'top'
sup = self.text(x, y, t, **kwargs)
if self._suptitle is not None:
self._suptitle.set_text(t)
self._suptitle.set_position((x, y))
self._suptitle.update_from(sup)
sup.remove()
else:
self._suptitle = sup
return self._suptitle
def set_canvas(self, canvas):
"""
Set the canvas the contains the figure
ACCEPTS: a FigureCanvas instance
"""
self.canvas = canvas
def hold(self, b=None):
"""
Set the hold state. If hold is None (default), toggle the
hold state. Else set the hold state to boolean value b.
e.g.::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def figimage(self, X,
xo=0,
yo=0,
alpha=None,
norm=None,
cmap=None,
vmin=None,
vmax=None,
origin=None,
**kwargs):
"""
Adds a non-resampled image to the figure.
call signatures::
figimage(X, **kwargs)
adds a non-resampled array *X* to the figure.
::
figimage(X, xo, yo)
with pixel offsets *xo*, *yo*,
*X* must be a float array:
* If *X* is MxN, assume luminance (grayscale)
* If *X* is MxNx3, assume RGB
* If *X* is MxNx4, assume RGBA
Optional keyword arguments:
========= =========================================================
Keyword Description
========= =========================================================
xo or yo An integer, the *x* and *y* image offset in pixels
cmap a :class:`matplotlib.colors.Colormap` instance, e.g.,
cm.jet. If *None*, default to the rc ``image.cmap``
value
norm a :class:`matplotlib.colors.Normalize` instance. The
default is normalization(). This scales luminance -> 0-1
vmin|vmax are used to scale a luminance image to 0-1. If either
is *None*, the min and max of the luminance values will
be used. Note if you pass a norm instance, the settings
for *vmin* and *vmax* will be ignored.
alpha the alpha blending value, default is *None*
origin [ 'upper' | 'lower' ] Indicates where the [0,0] index of
the array is in the upper left or lower left corner of
the axes. Defaults to the rc image.origin value
========= =========================================================
figimage complements the axes image
(:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled
to fit the current axes. If you want a resampled image to
fill the entire figure, you can define an
:class:`~matplotlib.axes.Axes` with size [0,1,0,1].
An :class:`matplotlib.image.FigureImage` instance is returned.
.. plot:: mpl_examples/pylab_examples/figimage_demo.py
Additional kwargs are Artist kwargs passed on to
:class:`~matplotlib.image.FigureImage`
"""
if not self._hold:
self.clf()
im = FigureImage(self, cmap, norm, xo, yo, origin, **kwargs)
im.set_array(X)
im.set_alpha(alpha)
if norm is None:
im.set_clim(vmin, vmax)
self.images.append(im)
im._remove_method = lambda h: self.images.remove(h)
return im
def set_size_inches(self, *args, **kwargs):
"""
set_size_inches(w,h, forward=False)
Set the figure size in inches (1in == 2.54cm)
Usage::
fig.set_size_inches(w,h) # OR
fig.set_size_inches((w,h) )
optional kwarg *forward=True* will cause the canvas size to be
automatically updated; e.g., you can resize the figure window
from the shell
ACCEPTS: a w,h tuple with w,h in inches
See Also
--------
matplotlib.Figure.get_size_inches
"""
forward = kwargs.get('forward', False)
if len(args) == 1:
w, h = args[0]
else:
w, h = args
dpival = self.dpi
self.bbox_inches.p1 = w, h
if forward:
dpival = self.dpi
canvasw = w * dpival
canvash = h * dpival
manager = getattr(self.canvas, 'manager', None)
if manager is not None:
manager.resize(int(canvasw), int(canvash))
def get_size_inches(self):
"""
Returns the current size of the figure in inches (1in == 2.54cm)
as an numpy array.
Returns
-------
size : ndarray
The size of the figure in inches
See Also
--------
matplotlib.Figure.set_size_inches
"""
return np.array(self.bbox_inches.p1)
def get_edgecolor(self):
'Get the edge color of the Figure rectangle'
return self.patch.get_edgecolor()
def get_facecolor(self):
'Get the face color of the Figure rectangle'
return self.patch.get_facecolor()
def get_figwidth(self):
'Return the figwidth as a float'
return self.bbox_inches.width
def get_figheight(self):
'Return the figheight as a float'
return self.bbox_inches.height
def get_dpi(self):
'Return the dpi as a float'
return self.dpi
def get_frameon(self):
'get the boolean indicating frameon'
return self.frameon
def set_edgecolor(self, color):
"""
Set the edge color of the Figure rectangle
ACCEPTS: any matplotlib color - see help(colors)
"""
self.patch.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the face color of the Figure rectangle
ACCEPTS: any matplotlib color - see help(colors)
"""
self.patch.set_facecolor(color)
def set_dpi(self, val):
"""
Set the dots-per-inch of the figure
ACCEPTS: float
"""
self.dpi = val
def set_figwidth(self, val):
"""
Set the width of the figure in inches
ACCEPTS: float
"""
self.bbox_inches.x1 = val
def set_figheight(self, val):
"""
Set the height of the figure in inches
ACCEPTS: float
"""
self.bbox_inches.y1 = val
def set_frameon(self, b):
"""
Set whether the figure frame (background) is displayed or invisible
ACCEPTS: boolean
"""
self.frameon = b
def delaxes(self, a):
'remove a from the figure and update the current axes'
self._axstack.remove(a)
for func in self._axobservers:
func(self)
def _make_key(self, *args, **kwargs):
'make a hashable key out of args and kwargs'
def fixitems(items):
#items may have arrays and lists in them, so convert them
# to tuples for the key
ret = []
for k, v in items:
# some objects can define __getitem__ without being
# iterable and in those cases the conversion to tuples
# will fail. So instead of using the iterable(v) function
# we simply try and convert to a tuple, and proceed if not.
try:
v = tuple(v)
except Exception:
pass
ret.append((k, v))
return tuple(ret)
def fixlist(args):
ret = []
for a in args:
if iterable(a):
a = tuple(a)
ret.append(a)
return tuple(ret)
key = fixlist(args), fixitems(six.iteritems(kwargs))
return key
@docstring.dedent_interpd
def add_axes(self, *args, **kwargs):
"""
Add an axes at position *rect* [*left*, *bottom*, *width*,
*height*] where all quantities are in fractions of figure
width and height. kwargs are legal
:class:`~matplotlib.axes.Axes` kwargs plus *projection* which
sets the projection type of the axes. (For backward
compatibility, ``polar=True`` may also be provided, which is
equivalent to ``projection='polar'``). Valid values for
*projection* are: %(projection_names)s. Some of these
projections support additional kwargs, which may be provided
to :meth:`add_axes`. Typical usage::
rect = l,b,w,h
fig.add_axes(rect)
fig.add_axes(rect, frameon=False, axisbg='g')
fig.add_axes(rect, polar=True)
fig.add_axes(rect, projection='polar')
fig.add_axes(ax)
If the figure already has an axes with the same parameters,
then it will simply make that axes current and return it. If
you do not want this behavior, e.g., you want to force the
creation of a new Axes, you must use a unique set of args and
kwargs. The axes :attr:`~matplotlib.axes.Axes.label`
attribute has been exposed for this purpose. e.g., if you want
two axes that are otherwise identical to be added to the
figure, make sure you give them unique labels::
fig.add_axes(rect, label='axes1')
fig.add_axes(rect, label='axes2')
In rare circumstances, add_axes may be called with a single
argument, an Axes instance already created in the present
figure but not in the figure's list of axes. For example,
if an axes has been removed with :meth:`delaxes`, it can
be restored with::
fig.add_axes(ax)
In all cases, the :class:`~matplotlib.axes.Axes` instance
will be returned.
In addition to *projection*, the following kwargs are supported:
%(Axes)s
"""
if not len(args):
return
# shortcut the projection "key" modifications later on, if an axes
# with the exact args/kwargs exists, return it immediately.
key = self._make_key(*args, **kwargs)
ax = self._axstack.get(key)
if ax is not None:
self.sca(ax)
return ax
if isinstance(args[0], Axes):
a = args[0]
assert(a.get_figure() is self)
else:
rect = args[0]
projection_class, kwargs, key = process_projection_requirements(
self, *args, **kwargs)
# check that an axes of this type doesn't already exist, if it
# does, set it as active and return it
ax = self._axstack.get(key)
if ax is not None and isinstance(ax, projection_class):
self.sca(ax)
return ax
# create the new axes using the axes class given
a = projection_class(self, rect, **kwargs)
self._axstack.add(key, a)
self.sca(a)
return a
@docstring.dedent_interpd
def add_subplot(self, *args, **kwargs):
"""
Add a subplot. Examples::
fig.add_subplot(111)
# equivalent but more general
fig.add_subplot(1,1,1)
# add subplot with red background
fig.add_subplot(212, axisbg='r')
# add a polar subplot
fig.add_subplot(111, projection='polar')
# add Subplot instance sub
fig.add_subplot(sub)
*kwargs* are legal :class:`~matplotlib.axes.Axes` kwargs plus
*projection*, which chooses a projection type for the axes.
(For backward compatibility, *polar=True* may also be
provided, which is equivalent to *projection='polar'*). Valid
values for *projection* are: %(projection_names)s. Some of
these projections
support additional *kwargs*, which may be provided to
:meth:`add_axes`.
The :class:`~matplotlib.axes.Axes` instance will be returned.
If the figure already has a subplot with key (*args*,
*kwargs*) then it will simply make that subplot current and
return it.
.. seealso:: :meth:`~matplotlib.pyplot.subplot` for an
explanation of the args.
The following kwargs are supported:
%(Axes)s
"""
if not len(args):
return
if len(args) == 1 and isinstance(args[0], int):
args = tuple([int(c) for c in str(args[0])])
if len(args) != 3:
raise ValueError("Integer subplot specification must " +
"be a three digit number. " +
"Not {n:d}".format(n=len(args)))
if isinstance(args[0], SubplotBase):
a = args[0]
assert(a.get_figure() is self)
# make a key for the subplot (which includes the axes object id
# in the hash)
key = self._make_key(*args, **kwargs)
else:
projection_class, kwargs, key = process_projection_requirements(
self, *args, **kwargs)
# try to find the axes with this key in the stack
ax = self._axstack.get(key)
if ax is not None:
if isinstance(ax, projection_class):
# the axes already existed, so set it as active & return
self.sca(ax)
return ax
else:
# Undocumented convenience behavior:
# subplot(111); subplot(111, projection='polar')
# will replace the first with the second.
# Without this, add_subplot would be simpler and
# more similar to add_axes.
self._axstack.remove(ax)
a = subplot_class_factory(projection_class)(self, *args, **kwargs)
self._axstack.add(key, a)
self.sca(a)
return a
def clf(self, keep_observers=False):
"""
Clear the figure.
Set *keep_observers* to True if, for example,
a gui widget is tracking the axes in the figure.
"""
self.suppressComposite = None
self.callbacks = cbook.CallbackRegistry()
for ax in tuple(self.axes): # Iterate over the copy.
ax.cla()
self.delaxes(ax) # removes ax from self._axstack
toolbar = getattr(self.canvas, 'toolbar', None)
if toolbar is not None:
toolbar.update()
self._axstack.clear()
self.artists = []
self.lines = []
self.patches = []
self.texts = []
self.images = []
self.legends = []
if not keep_observers:
self._axobservers = []
self._suptitle = None
def clear(self):
"""
Clear the figure -- synonym for :meth:`clf`.
"""
self.clf()
@allow_rasterization
def draw(self, renderer):
"""
Render the figure using :class:`matplotlib.backend_bases.RendererBase`
instance *renderer*.
"""
# draw the figure bounding box, perhaps none for white figure
if not self.get_visible():
return
renderer.open_group('figure')
if self.get_tight_layout() and self.axes:
try:
self.tight_layout(renderer, **self._tight_parameters)
except ValueError:
pass
# ValueError can occur when resizing a window.
if self.frameon:
self.patch.draw(renderer)
# a list of (zorder, func_to_call, list_of_args)
dsu = []
for a in self.patches:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.lines:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.artists:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
# override the renderer default if self.suppressComposite
# is not None
not_composite = renderer.option_image_nocomposite()
if self.suppressComposite is not None:
not_composite = self.suppressComposite
if (len(self.images) <= 1 or not_composite or
not cbook.allequal([im.origin for im in self.images])):
for a in self.images:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
else:
# make a composite image blending alpha
# list of (_image.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag), im.ox, im.oy, im.get_alpha())
for im in self.images]
im = _image.from_images(self.bbox.height * mag,
self.bbox.width * mag,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
def draw_composite():
gc = renderer.new_gc()
gc.set_clip_rectangle(self.bbox)
gc.set_clip_path(self.get_clip_path())
renderer.draw_image(gc, l, b, im)
gc.restore()
dsu.append((self.images[0].get_zorder(), self.images[0],
draw_composite, []))
# render the axes
for a in self.axes:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
# render the figure text
for a in self.texts:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
for a in self.legends:
dsu.append((a.get_zorder(), a, a.draw, [renderer]))
dsu = [row for row in dsu if not row[1].get_animated()]
dsu.sort(key=itemgetter(0))
for zorder, a, func, args in dsu:
func(*args)
renderer.close_group('figure')
self._cachedRenderer = renderer
self.canvas.draw_event(renderer)
def draw_artist(self, a):
"""
draw :class:`matplotlib.artist.Artist` instance *a* only --
this is available only after the figure is drawn
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def get_axes(self):
return self.axes
def legend(self, handles, labels, *args, **kwargs):
"""
Place a legend in the figure. Labels are a sequence of
strings, handles is a sequence of
:class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances, and loc can be a
string or an integer specifying the legend location
USAGE::
legend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right')
The *loc* location codes are::
'best' : 0, (currently not supported for figure legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
*loc* can also be an (x,y) tuple in figure coords, which
specifies the lower left of the legend box. figure coords are
(0,0) is the left, bottom of the figure and 1,1 is the right,
top.
Keyword arguments:
*prop*: [ *None* | FontProperties | dict ]
A :class:`matplotlib.font_manager.FontProperties`
instance. If *prop* is a dictionary, a new instance will be
created with *prop*. If *None*, use rc settings.
*numpoints*: integer
The number of points in the legend line, default is 4
*scatterpoints*: integer
The number of points in the legend line, default is 4
*scatteryoffsets*: list of floats
a list of yoffsets for scatter symbols in legend
*markerscale*: [ *None* | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*fancybox*: [ *None* | *False* | *True* ]
if *True*, draw a frame with a round fancybox. If *None*, use rc
*shadow*: [ *None* | *False* | *True* ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*ncol* : integer
number of columns. default is 1
*mode* : [ "expand" | *None* ]
if mode is "expand", the legend will be horizontally expanded
to fill the axes area (or *bbox_to_anchor*)
*title* : string
the legend title
Padding and spacing between various elements use following keywords
parameters. The dimensions of these values are given as a fraction
of the fontsize. Values from rcParams will be used if None.
================ ====================================================
Keyword Description
================ ====================================================
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ====================================================
.. Note:: Not all kinds of artist are supported by the legend.
See LINK (FIXME) for details.
**Example:**
.. plot:: mpl_examples/pylab_examples/figlegend_demo.py
"""
l = Legend(self, handles, labels, *args, **kwargs)
self.legends.append(l)
l._remove_method = lambda h: self.legends.remove(h)
return l
@docstring.dedent_interpd
def text(self, x, y, s, *args, **kwargs):
"""
Add text to figure.
Call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text to figure at location *x*, *y* (relative 0-1
coords). See :func:`~matplotlib.pyplot.text` for the meaning
of the other arguments.
kwargs control the :class:`~matplotlib.text.Text` properties:
%(Text)s
"""
override = _process_text_args({}, *args, **kwargs)
t = Text(x=x, y=y, text=s)
t.update(override)
self._set_artist_props(t)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
return t
def _set_artist_props(self, a):
if a != self:
a.set_figure(self)
a.set_transform(self.transFigure)
@docstring.dedent_interpd
def gca(self, **kwargs):
"""
Get the current axes, creating one if necessary
The following kwargs are supported for ensuring the returned axes
adheres to the given projection etc., and for axes creation if
the active axes does not exist:
%(Axes)s
"""
ckey, cax = self._axstack.current_key_axes()
# if there exists an axes on the stack see if it maches
# the desired axes configuration
if cax is not None:
# if no kwargs are given just return the current axes
# this is a convenience for gca() on axes such as polar etc.
if not kwargs:
return cax
# if the user has specified particular projection detail
# then build up a key which can represent this
else:
# we don't want to modify the original kwargs
# so take a copy so that we can do what we like to it
kwargs_copy = kwargs.copy()
projection_class, _, key = process_projection_requirements(
self, **kwargs_copy)
# let the returned axes have any gridspec by removing it from
# the key
ckey = ckey[1:]
key = key[1:]
# if the cax matches this key then return the axes, otherwise
# continue and a new axes will be created
if key == ckey and isinstance(cax, projection_class):
return cax
# no axes found, so create one which spans the figure
return self.add_subplot(1, 1, 1, **kwargs)
def sca(self, a):
'Set the current axes to be a and return a'
self._axstack.bubble(a)
for func in self._axobservers:
func(self)
return a
def _gci(self):
"""
helper for :func:`~matplotlib.pyplot.gci`;
do not use elsewhere.
"""
# Look first for an image in the current Axes:
cax = self._axstack.current_key_axes()[1]
if cax is None:
return None
im = cax._gci()
if im is not None:
return im
# If there is no image in the current Axes, search for
# one in a previously created Axes. Whether this makes
# sense is debatable, but it is the documented behavior.
for ax in reversed(self.axes):
im = ax._gci()
if im is not None:
return im
return None
def __getstate__(self):
state = self.__dict__.copy()
# the axobservers cannot currently be pickled.
# Additionally, the canvas cannot currently be pickled, but this has
# the benefit of meaning that a figure can be detached from one canvas,
# and re-attached to another.
for attr_to_pop in ('_axobservers', 'show',
'canvas', '_cachedRenderer'):
state.pop(attr_to_pop, None)
# add version information to the state
state['__mpl_version__'] = _mpl_version
# check to see if the figure has a manager and whether it is registered
# with pyplot
if getattr(self.canvas, 'manager', None) is not None:
manager = self.canvas.manager
import matplotlib._pylab_helpers
if manager in list(six.itervalues(
matplotlib._pylab_helpers.Gcf.figs)):
state['_restore_to_pylab'] = True
return state
def __setstate__(self, state):
version = state.pop('__mpl_version__')
restore_to_pylab = state.pop('_restore_to_pylab', False)
if version != _mpl_version:
import warnings
warnings.warn("This figure was saved with matplotlib version %s "
"and is unlikely to function correctly." %
(version, ))
self.__dict__ = state
# re-initialise some of the unstored state information
self._axobservers = []
self.canvas = None
if restore_to_pylab:
# lazy import to avoid circularity
import matplotlib.pyplot as plt
import matplotlib._pylab_helpers as pylab_helpers
allnums = plt.get_fignums()
num = max(allnums) + 1 if allnums else 1
mgr = plt._backend_mod.new_figure_manager_given_figure(num, self)
# XXX The following is a copy and paste from pyplot. Consider
# factoring to pylab_helpers
if self.get_label():
mgr.set_window_title(self.get_label())
# make this figure current on button press event
def make_active(event):
pylab_helpers.Gcf.set_active(mgr)
mgr._cidgcf = mgr.canvas.mpl_connect('button_press_event',
make_active)
pylab_helpers.Gcf.set_active(mgr)
self.number = num
plt.draw_if_interactive()
def add_axobserver(self, func):
'whenever the axes state change, ``func(self)`` will be called'
self._axobservers.append(func)
def savefig(self, *args, **kwargs):
"""
Save the current figure.
Call signature::
savefig(fname, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
The output formats available depend on the backend being used.
Arguments:
*fname*:
A string containing a path to a filename, or a Python
file-like object, or possibly some backend-dependent object
such as :class:`~matplotlib.backends.backend_pdf.PdfPages`.
If *format* is *None* and *fname* is a string, the output
format is deduced from the extension of the filename. If
the filename has no extension, the value of the rc parameter
``savefig.format`` is used.
If *fname* is not a string, remember to specify *format* to
ensure that the correct backend is used.
Keyword arguments:
*dpi*: [ *None* | ``scalar > 0`` ]
The resolution in dots per inch. If *None* it will default to
the value ``savefig.dpi`` in the matplotlibrc file.
*facecolor*, *edgecolor*:
the colors of the figure rectangle
*orientation*: [ 'landscape' | 'portrait' ]
not supported on all backends; currently only on postscript output
*papertype*:
One of 'letter', 'legal', 'executive', 'ledger', 'a0' through
'a10', 'b0' through 'b10'. Only supported for postscript
output.
*format*:
One of the file extensions supported by the active
backend. Most backends support png, pdf, ps, eps and svg.
*transparent*:
If *True*, the axes patches will all be transparent; the
figure patch will also be transparent unless facecolor
and/or edgecolor are specified via kwargs.
This is useful, for example, for displaying
a plot on top of a colored background on a web page. The
transparency of these patches will be restored to their
original values upon exit of this function.
*frameon*:
If *True*, the figure patch will be colored, if *False*, the
figure background will be transparent. If not provided, the
rcParam 'savefig.frameon' will be used.
*bbox_inches*:
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure.
*pad_inches*:
Amount of padding around the figure when bbox_inches is
'tight'.
*bbox_extra_artists*:
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
kwargs.setdefault('dpi', rcParams['savefig.dpi'])
frameon = kwargs.pop('frameon', rcParams['savefig.frameon'])
transparent = kwargs.pop('transparent',
rcParams['savefig.transparent'])
if transparent:
kwargs.setdefault('facecolor', 'none')
kwargs.setdefault('edgecolor', 'none')
original_axes_colors = []
for ax in self.axes:
patch = ax.patch
original_axes_colors.append((patch.get_facecolor(),
patch.get_edgecolor()))
patch.set_facecolor('none')
patch.set_edgecolor('none')
else:
kwargs.setdefault('facecolor', rcParams['savefig.facecolor'])
kwargs.setdefault('edgecolor', rcParams['savefig.edgecolor'])
if frameon:
original_frameon = self.get_frameon()
self.set_frameon(frameon)
self.canvas.print_figure(*args, **kwargs)
if frameon:
self.set_frameon(original_frameon)
if transparent:
for ax, cc in zip(self.axes, original_axes_colors):
ax.patch.set_facecolor(cc[0])
ax.patch.set_edgecolor(cc[1])
@docstring.dedent_interpd
def colorbar(self, mappable, cax=None, ax=None, use_gridspec=True, **kw):
"""
Create a colorbar for a ScalarMappable instance, *mappable*.
Documentation for the pylab thin wrapper:
%(colorbar_doc)s
"""
if ax is None:
ax = self.gca()
# Store the value of gca so that we can set it back later on.
current_ax = self.gca()
if cax is None:
if use_gridspec and isinstance(ax, SubplotBase):
cax, kw = cbar.make_axes_gridspec(ax, **kw)
else:
cax, kw = cbar.make_axes(ax, **kw)
cax.hold(True)
cb = cbar.colorbar_factory(cax, mappable, **kw)
self.sca(current_ax)
return cb
def subplots_adjust(self, *args, **kwargs):
"""
Call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Update the :class:`SubplotParams` with *kwargs* (defaulting to rc when
*None*) and update the subplot locations
"""
self.subplotpars.update(*args, **kwargs)
for ax in self.axes:
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if (ax._sharex is not None and
isinstance(ax._sharex, SubplotBase)):
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif (ax._sharey is not None and
isinstance(ax._sharey, SubplotBase)):
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ax.update_params()
ax.set_position(ax.figbox)
def ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=1,
mouse_pop=3, mouse_stop=2):
"""
Call signature::
ginput(self, n=1, timeout=30, show_clicks=True,
mouse_add=1, mouse_pop=3, mouse_stop=2)
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is zero or negative, does not timeout.
If *n* is zero or negative, accumulate clicks until a middle click
(or potentially both mouse buttons at once) terminates the input.
Right clicking cancels last input.
The buttons used for the various actions (adding points, removing
points, terminating the inputs) can be overriden via the
arguments *mouse_add*, *mouse_pop* and *mouse_stop*, that give
the associated mouse button: 1 for left, 2 for middle, 3 for
right.
The keyboard can also be used to select points in case your mouse
does not have one or more of the buttons. The delete and backspace
keys act like right clicking (i.e., remove last point), the enter key
terminates input and any other key (not already used by the window
manager) selects a point.
"""
blocking_mouse_input = BlockingMouseInput(self,
mouse_add=mouse_add,
mouse_pop=mouse_pop,
mouse_stop=mouse_stop)
return blocking_mouse_input(n=n, timeout=timeout,
show_clicks=show_clicks)
def waitforbuttonpress(self, timeout=-1):
"""
Call signature::
waitforbuttonpress(self, timeout=-1)
Blocking call to interact with the figure.
This will return True is a key was pressed, False if a mouse
button was pressed and None if *timeout* was reached without
either being pressed.
If *timeout* is negative, does not timeout.
"""
blocking_input = BlockingKeyMouseInput(self)
return blocking_input(timeout=timeout)
def get_default_bbox_extra_artists(self):
bbox_artists = [artist for artist in self.get_children()
if artist.get_visible()]
for ax in self.axes:
if ax.get_visible():
bbox_artists.extend(ax.get_default_bbox_extra_artists())
# we don't want the figure's patch to influence the bbox calculation
bbox_artists.remove(self.patch)
return bbox_artists
def get_tightbbox(self, renderer):
"""
Return a (tight) bounding box of the figure in inches.
It only accounts axes title, axis labels, and axis
ticklabels. Needs improvement.
"""
bb = []
for ax in self.axes:
if ax.get_visible():
bb.append(ax.get_tightbbox(renderer))
if len(bb) == 0:
return self.bbox_inches
_bbox = Bbox.union([b for b in bb if b.width != 0 or b.height != 0])
bbox_inches = TransformedBbox(_bbox,
Affine2D().scale(1. / self.dpi))
return bbox_inches
def tight_layout(self, renderer=None, pad=1.08, h_pad=None,
w_pad=None, rect=None):
"""
Adjust subplot parameters to give specified padding.
Parameters:
*pad* : float
padding between the figure edge and the edges of subplots,
as a fraction of the font-size.
*h_pad*, *w_pad* : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
*rect* : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
from .tight_layout import (get_renderer, get_tight_layout_figure,
get_subplotspec_list)
subplotspec_list = get_subplotspec_list(self.axes)
if None in subplotspec_list:
warnings.warn("This figure includes Axes that are not "
"compatible with tight_layout, so its "
"results might be incorrect.")
if renderer is None:
renderer = get_renderer(self)
kwargs = get_tight_layout_figure(self, self.axes, subplotspec_list,
renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=rect)
self.subplots_adjust(**kwargs)
def figaspect(arg):
"""
Create a figure with specified aspect ratio. If *arg* is a number,
use that aspect ratio. If *arg* is an array, figaspect will
determine the width and height for a figure that would fit array
preserving aspect ratio. The figure width, height in inches are
returned. Be sure to create an axes with equal with and height,
e.g.,
Example usage::
# make a figure twice as tall as it is wide
w, h = figaspect(2.)
fig = Figure(figsize=(w,h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
# make a figure with the proper aspect for an array
A = rand(5,3)
w, h = figaspect(A)
fig = Figure(figsize=(w,h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
Thanks to Fernando Perez for this function
"""
isarray = hasattr(arg, 'shape')
# min/max sizes to respect when autoscaling. If John likes the idea, they
# could become rc parameters, for now they're hardwired.
figsize_min = np.array((4.0, 2.0)) # min length for width/height
figsize_max = np.array((16.0, 16.0)) # max length for width/height
#figsize_min = rcParams['figure.figsize_min']
#figsize_max = rcParams['figure.figsize_max']
# Extract the aspect ratio of the array
if isarray:
nr, nc = arg.shape[:2]
arr_ratio = float(nr) / nc
else:
arr_ratio = float(arg)
# Height of user figure defaults
fig_height = rcParams['figure.figsize'][1]
# New size for the figure, keeping the aspect ratio of the caller
newsize = np.array((fig_height / arr_ratio, fig_height))
# Sanity checks, don't drop either dimension below figsize_min
newsize /= min(1.0, *(newsize / figsize_min))
# Avoid humongous windows as well
newsize /= max(1.0, *(newsize / figsize_max))
# Finally, if we have a really funky aspect ratio, break it but respect
# the min/max dimensions (we don't want figures 10 feet tall!)
newsize = np.clip(newsize, figsize_min, figsize_max)
return newsize
docstring.interpd.update(Figure=martist.kwdoc(Figure))
| lgpl-3.0 |
nok/sklearn-porter | tests/estimator/classifier/SVC/SVCJavaTest.py | 1 | 3668 | # -*- coding: utf-8 -*-
import unittest
from unittest import TestCase
import numpy as np
from sklearn.svm.classes import SVC
from tests.estimator.classifier.Classifier import Classifier
from tests.estimator.classifier.ExportedData import ExportedData
from tests.language.Java import Java
class SVCJavaTest(Java, Classifier, ExportedData, TestCase):
def setUp(self):
super(SVCJavaTest, self).setUp()
self.estimator = SVC(C=1., kernel='rbf', gamma=0.001, random_state=0)
def tearDown(self):
super(SVCJavaTest, self).tearDown()
def test_linear_kernel(self):
self.estimator = SVC(C=1., kernel='linear',
gamma=0.001, random_state=0)
self.load_iris_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
preds, ground_truth = [], []
for _ in range(self.TEST_N_RANDOM_FEATURE_SETS):
x = np.random.uniform(amin, amax, self.n_features)
preds.append(self.pred_in_custom(x))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
def test_sigmoid_kernel(self):
self.estimator = SVC(C=1., kernel='sigmoid',
gamma=0.001, random_state=0)
self.load_iris_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
preds, ground_truth = [], []
for _ in range(self.TEST_N_RANDOM_FEATURE_SETS):
x = np.random.uniform(amin, amax, self.n_features)
preds.append(self.pred_in_custom(x))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
def test_auto_gamma(self):
self.estimator = SVC(C=1., gamma='auto', random_state=0)
self.load_iris_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
preds, ground_truth = [], []
for _ in range(self.TEST_N_RANDOM_FEATURE_SETS):
x = np.random.uniform(amin, amax, self.n_features)
preds.append(self.pred_in_custom(x))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
@unittest.skip('The generated code would be too large.')
def test_existing_features__binary_data__default(self):
pass
@unittest.skip('The generated code would be too large.')
def test_random_features__binary_data__default(self):
pass
@unittest.skip('The generated code would be too large.')
def test_existing_features__digits_data__default(self):
pass
@unittest.skip('The generated code would be too large.')
def test_random_features__digits_data__default(self):
pass
@unittest.skip('The generated code would be too large.')
def test_rbf_kernel__binary_data__default(self):
pass
@unittest.skip('The generated code would be too large.')
def test_linear_kernel_w_binary_data(self):
pass
@unittest.skip('The generated code would be too large.')
def test_poly_kernel(self):
pass
@unittest.skip('The generated code would be too large.')
def test_poly_kernel_w_binary_data(self):
pass
@unittest.skip('The generated code would be too large.')
def test_sigmoid_kernel_w_binary_data(self):
pass
| mit |
jairideout/scikit-bio | skbio/stats/tests/test_power.py | 12 | 22512 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.stats import kruskal
from skbio.stats.power import (subsample_power,
subsample_paired_power,
_check_nans,
confidence_bound,
_calculate_power,
_compare_distributions,
_calculate_power_curve,
_check_subsample_power_inputs,
_identify_sample_groups,
_draw_paired_samples,
_get_min_size,
bootstrap_power_curve,
paired_subsamples
)
class PowerAnalysisTest(TestCase):
def setUp(self):
# Defines a testing functions
def test_meta(ids, meta, cat, div):
"""Checks thhe div metric with a kruskal wallis"""
out = [meta.loc[id_, div] for id_ in ids]
return kruskal(*out)[1]
def meta_f(x):
"""Applies `test_meta` to a result"""
return test_meta(x, self.meta, 'INT', 'DIV')
def f(x):
"""returns the p value of a kruskal wallis test"""
return kruskal(*x)[1]
self.test_meta = test_meta
self.f = f
self.meta_f = meta_f
self.num_p = 1
# Sets the random seed
np.random.seed(5)
# Sets up the distributions of data for use
self.s1 = np.arange(0, 10, 1)
# Sets up two distributions which will never be equal by a rank-sum
# test.
self.samps = [np.ones((10))/10., np.ones((10))]
self.pop = [np.arange(0, 10, 0.1), np.arange(0, 20, 0.2)]
# Sets up a vector of alpha values
self.alpha = np.power(10, np.array([-1, -1.301, -2, -3])).round(3)
# Sets up a vector of samples
self.num_samps = np.arange(10, 100, 10)
# Sets up a mapping file
meta = {'GW': {'INT': 'N', 'ABX': np.nan, 'DIV': 19.5, 'AGE': '30s',
'SEX': 'M'},
'CB': {'INT': 'Y', 'ABX': np.nan, 'DIV': 42.7, 'AGE': '30s',
'SEX': 'M'},
'WM': {'INT': 'N', 'ABX': 'N', 'DIV': 27.5, 'AGE': '20s',
'SEX': 'F'},
'MH': {'INT': 'Y', 'ABX': 'N', 'DIV': 62.3, 'AGE': '30s',
'SEX': 'F'},
'CD': {'INT': 'Y', 'ABX': 'Y', 'DIV': 36.4, 'AGE': '40s',
'SEX': 'F'},
'LF': {'INT': 'Y', 'ABX': 'N', 'DIV': 50.2, 'AGE': '20s',
'SEX': 'M'},
'PP': {'INT': 'N', 'ABX': 'Y', 'DIV': 10.8, 'AGE': '30s',
'SEX': 'F'},
'MM': {'INT': 'N', 'ABX': 'N', 'DIV': 55.6, 'AGE': '40s',
'SEX': 'F'},
'SR': {'INT': 'N', 'ABX': 'Y', 'DIV': 2.2, 'AGE': '20s',
'SEX': 'M'},
'TS': {'INT': 'N', 'ABX': 'Y', 'DIV': 16.1, 'AGE': '40s',
'SEX': 'M'},
'PC': {'INT': 'Y', 'ABX': 'N', 'DIV': 82.6, 'AGE': '40s',
'SEX': 'M'},
'NR': {'INT': 'Y', 'ABX': 'Y', 'DIV': 15.7, 'AGE': '20s',
'SEX': 'F'}}
self.meta = pd.DataFrame.from_dict(meta, orient='index')
self.meta_pairs = {0: [['GW', 'SR', 'TS'], ['CB', 'LF', 'PC']],
1: [['MM', 'PP', 'WM'], ['CD', 'MH', 'NR']]}
self.pair_index = np.array([0, 0, 0, 1, 1, 1])
self.counts = np.array([5, 15, 25, 35, 45])
self.powers = [np.array([[0.105, 0.137, 0.174, 0.208, 0.280],
[0.115, 0.135, 0.196, 0.204, 0.281],
[0.096, 0.170, 0.165, 0.232, 0.256],
[0.122, 0.157, 0.202, 0.250, 0.279],
[0.132, 0.135, 0.173, 0.203, 0.279]]),
np.array([[0.157, 0.345, 0.522, 0.639, 0.739],
[0.159, 0.374, 0.519, 0.646, 0.757],
[0.161, 0.339, 0.532, 0.634, 0.745],
[0.169, 0.372, 0.541, 0.646, 0.762],
[0.163, 0.371, 0.522, 0.648, 0.746]]),
np.array([[0.276, 0.626, 0.865, 0.927, 0.992],
[0.267, 0.667, 0.848, 0.937, 0.978],
[0.236, 0.642, 0.850, 0.935, 0.977],
[0.249, 0.633, 0.828, 0.955, 0.986],
[0.249, 0.663, 0.869, 0.951, 0.985]])]
self.power_alpha = 0.1
self.effects = np.array([0.15245, 0.34877, 0.55830])
self.bounds = np.array([0.01049, 0.00299, 0.007492])
self.labels = np.array(['Age', 'Intervenption', 'Antibiotics'])
self.cats = np.array(['AGE', 'INT', 'ABX'])
self.cat = "AGE"
self.control_cats = ['INT', 'ABX']
def test_subsample_power_defaults(self):
test_p, test_c = subsample_power(self.f, self.pop,
num_iter=10, num_runs=5)
self.assertEqual(test_p.shape, (5, 4))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_power_counts(self):
test_p, test_c = subsample_power(self.f,
samples=self.pop,
num_iter=10,
num_runs=2,
min_counts=5)
self.assertEqual(test_p.shape, (2, 5))
npt.assert_array_equal(np.arange(5, 50, 10), test_c)
def test_subsample_power_matches(self):
test_p, test_c = subsample_power(self.f,
samples=self.pop,
num_iter=10,
num_runs=5,
draw_mode="matched")
self.assertEqual(test_p.shape, (5, 4))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_power_multi_p(self):
test_p, test_c = subsample_power(lambda x: np.array([0.5, 0.5]),
samples=self.pop,
num_iter=10,
num_runs=5)
self.assertEqual(test_p.shape, (5, 4, 2))
npt.assert_array_equal(np.array([10, 20, 30, 40]), test_c)
def test_subsample_paired_power(self):
known_c = np.array([1, 2, 3, 4])
# Sets up the handling values
cat = 'INT'
control_cats = ['SEX']
# Tests for the control cats
test_p, test_c = subsample_paired_power(self.meta_f,
meta=self.meta,
cat=cat,
control_cats=control_cats,
counts_interval=1,
num_iter=10,
num_runs=2)
# Test the output shapes are sane
self.assertEqual(test_p.shape, (2, 4))
npt.assert_array_equal(known_c, test_c)
def test_subsample_paired_power_multi_p(self):
def f(x):
return np.array([0.5, 0.5, 0.005])
cat = 'INT'
control_cats = ['SEX']
# Tests for the control cats
test_p, test_c = subsample_paired_power(f,
meta=self.meta,
cat=cat,
control_cats=control_cats,
counts_interval=1,
num_iter=10,
num_runs=2)
self.assertEqual(test_p.shape, (2, 4, 3))
def test_check_nans_str(self):
self.assertTrue(_check_nans('string'))
def test_check_nans_num(self):
self.assertTrue(_check_nans(4.2))
def test__check_nans_nan(self):
self.assertFalse(_check_nans(np.nan))
def test__check_nans_clean_list(self):
self.assertTrue(_check_nans(['foo', 'bar'], switch=True))
def test__check_nans_list_nan(self):
self.assertFalse(_check_nans(['foo', np.nan], switch=True))
def test__check_str_error(self):
with self.assertRaises(TypeError):
_check_nans(self.f)
def test__get_min_size_strict(self):
known = 5
test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'],
True)
self.assertEqual(test, known)
def test__get_min_size_relaxed(self):
known = 5
test = _get_min_size(self.meta, 'INT', ['ABX', 'SEX'], ['Y', 'N'],
False)
self.assertEqual(known, test)
def test_confidence_bound_default(self):
# Sets the know confidence bound
known = 2.2830070
test = confidence_bound(self.s1)
npt.assert_almost_equal(test, known, 3)
def test_confidence_bound_df(self):
known = 2.15109
test = confidence_bound(self.s1, df=15)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_alpha(self):
known = 3.2797886
test = confidence_bound(self.s1, alpha=0.01)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_nan(self):
# Sets the value to test
samples = np.array([[4, 3.2, 3.05],
[2, 2.8, 2.95],
[5, 2.9, 3.07],
[1, 3.1, 2.93],
[3, np.nan, 3.00]])
# Sets the know value
known = np.array([2.2284, 0.2573, 0.08573])
# Tests the function
test = confidence_bound(samples, axis=0)
npt.assert_almost_equal(known, test, 3)
def test_confidence_bound_axis_none(self):
# Sets the value to test
samples = np.array([[4, 3.2, 3.05],
[2, 2.8, 2.95],
[5, 2.9, 3.07],
[1, 3.1, 2.93],
[3, np.nan, 3.00]])
# Sest the known value
known = 0.52852
# Tests the output
test = confidence_bound(samples, axis=None)
npt.assert_almost_equal(known, test, 3)
def test__calculate_power(self):
# Sets up the values to test
crit = 0.025
# Sets the known value
known = 0.5
# Calculates the test value
test = _calculate_power(self.alpha, crit)
# Checks the test value
npt.assert_almost_equal(known, test)
def test__calculate_power_n(self):
crit = 0.025
known = np.array([0.5, 0.5])
alpha = np.vstack((self.alpha, self.alpha))
test = _calculate_power(alpha, crit)
npt.assert_almost_equal(known, test)
def test__compare_distributions_sample_counts_error(self):
with self.assertRaises(ValueError):
_compare_distributions(self.f, [self.pop[0][:5], self.pop[1]], 1,
counts=25)
def test__compare_distributions_all_mode(self):
known = np.ones((100))*0.0026998
test = _compare_distributions(self.f, self.samps, 1, num_iter=100)
npt.assert_allclose(known, test, 5)
def test__compare_distributions_matched_mode(self):
# Sets the known value
known_mean = 0.162195
known_std = 0.121887
known_shape = (100,)
# Tests the sample value
test = _compare_distributions(self.f, self.pop, self.num_p,
mode='matched', num_iter=100)
npt.assert_allclose(known_mean, test.mean(), rtol=0.1, atol=0.02)
npt.assert_allclose(known_std, test.std(), rtol=0.1, atol=0.02)
self.assertEqual(known_shape, test.shape)
def test__compare_distributions_draw_mode(self):
draw_mode = 'Ultron'
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f, self.pop, draw_mode,
self.num_p)
def test__compare_distributions_multiple_returns(self):
known = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
def f(x):
return np.array([1, 2, 3])
test = _compare_distributions(f, self.pop, 3, mode='matched',
num_iter=3)
npt.assert_array_equal(known, test)
def test_check_subsample_power_inputs_matched_mode(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
samples=[np.ones((2)), np.ones((5))],
draw_mode="matched")
def test_check_subsample_power_inputs_counts(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
samples=[np.ones((3)), np.ones((5))],
min_counts=5,
counts_interval=1000,
max_counts=7)
def test_check_subsample_power_inputs_ratio(self):
with self.assertRaises(ValueError):
_check_subsample_power_inputs(self.f,
self.samps,
ratio=np.array([1, 2, 3]))
def test_check_subsample_power_inputs_test(self):
# Defines a test function
def test(x):
return 'Hello World!'
with self.assertRaises(TypeError):
_check_subsample_power_inputs(test, self.samps)
def test_check_sample_power_inputs(self):
# Defines the know returns
known_num_p = 1
known_ratio = np.ones((2))
known_counts = np.arange(2, 10, 2)
# Runs the code for the returns
test_ratio, test_num_p, test_counts = \
_check_subsample_power_inputs(self.f,
self.samps,
counts_interval=2,
max_counts=10)
# Checks the returns are sane
self.assertEqual(known_num_p, test_num_p)
npt.assert_array_equal(known_ratio, test_ratio)
npt.assert_array_equal(known_counts, test_counts)
def test__calculate_power_curve_ratio_error(self):
with self.assertRaises(ValueError):
_calculate_power_curve(self.f, self.pop, self.num_samps,
ratio=np.array([0.1, 0.2, 0.3]),
num_iter=100)
def test__calculate_power_curve_default(self):
# Sets the known output
known = np.array([0.509, 0.822, 0.962, 0.997, 1.000, 1.000, 1.000,
1.000, 1.000])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.01)
def test__calculate_power_curve_alpha(self):
# Sets the know output
known = np.array([0.31, 0.568, 0.842, 0.954, 0.995, 1.000, 1.000,
1.000, 1.000])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
alpha=0.01,
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
def test__calculate_power_curve_ratio(self):
# Sets the know output
known = np.array([0.096, 0.333, 0.493, 0.743, 0.824, 0.937, 0.969,
0.996, 0.998])
# Generates the test values
test = _calculate_power_curve(self.f,
self.pop,
self.num_samps,
ratio=np.array([0.25, 0.75]),
num_iter=100)
# Checks the samples returned sanely
npt.assert_allclose(test, known, rtol=0.1, atol=0.1)
def test_bootstrap_power_curve(self):
# Sets the known values
known_mean = np.array([0.500, 0.82, 0.965, 0.995, 1.000, 1.000,
1.000, 1.000, 1.000])
known_bound = np.array([0.03, 0.02, 0.01, 0.01, 0.00, 0.00, 0.00, 0.00,
0.00])
# Generates the test values
test_mean, test_bound = bootstrap_power_curve(self.f,
self.pop,
self.num_samps,
num_iter=100)
# Checks the function returned sanely
npt.assert_allclose(test_mean, known_mean, rtol=0.05, atol=0.05)
npt.assert_allclose(test_bound, known_bound, rtol=0.1, atol=0.01)
def test_paired_subsamples_default(self):
# Sets the known np.array set
known_array = [{'MM', 'SR', 'TS', 'GW', 'PP', 'WM'},
{'CD', 'LF', 'PC', 'CB', 'MH', 'NR'}]
# Gets the test value
cat = 'INT'
control_cats = ['SEX', 'AGE']
test_array = paired_subsamples(self.meta, cat, control_cats)
self.assertEqual(known_array[0], set(test_array[0]))
self.assertEqual(known_array[1], set(test_array[1]))
def test_paired_subsamples_break(self):
# Sets known np.array set
known_array = [np.array([]), np.array([])]
# Gets the test value
cat = 'ABX'
control_cats = ['SEX', 'AGE', 'INT']
test_array = paired_subsamples(self.meta, cat, control_cats)
npt.assert_array_equal(known_array, test_array)
def test_paired_subsample_undefined(self):
known_array = np.zeros((2, 0))
cat = 'INT'
order = ['Y', 'N']
control_cats = ['AGE', 'ABX', 'SEX']
test_array = paired_subsamples(self.meta, cat, control_cats,
order=order)
npt.assert_array_equal(test_array, known_array)
def test_paired_subsample_fewer(self):
# Set known value
known_array = {'PP', 'MH', 'CD', 'PC', 'TS', 'MM'}
# Sets up test values
cat = 'AGE'
order = ['30s', '40s']
control_cats = ['ABX']
test_array = paired_subsamples(self.meta, cat, control_cats,
order=order)
for v in test_array[0]:
self.assertTrue(v in known_array)
for v in test_array[1]:
self.assertTrue(v in known_array)
def test_paired_subsamples_not_strict(self):
known_array = [{'WM', 'MM', 'GW', 'SR', 'TS'},
{'LF', 'PC', 'CB', 'NR', 'CD'}]
# Gets the test values
cat = 'INT'
control_cats = ['ABX', 'AGE']
test_array = paired_subsamples(self.meta, cat, control_cats,
strict_match=False)
self.assertEqual(set(test_array[0]), known_array[0])
self.assertEqual(set(test_array[1]), known_array[1])
def test__identify_sample_groups(self):
# Defines the know values
known_pairs = {0: [['MM'], ['CD']],
1: [['SR'], ['LF']],
2: [['TS'], ['PC']],
3: [['GW'], ['CB']],
4: [['PP'], ['MH']],
5: [['WM'], ['NR']]}
known_index = np.array([0, 1, 2, 3, 4, 5])
test_pairs, test_index = _identify_sample_groups(self.meta,
'INT',
['SEX', 'AGE'],
order=['N', 'Y'],
strict_match=True)
self.assertEqual(known_pairs.keys(), test_pairs.keys())
self.assertEqual(sorted(known_pairs.values()),
sorted(test_pairs.values()))
npt.assert_array_equal(known_index, test_index)
def test__identify_sample_groups_not_strict(self):
# Defines the know values
known_pairs = {0: [['PP'], ['CD', 'NR']],
1: [['MM', 'WM'], ['MH']],
2: [['GW'], ['CB']]}
known_index = np.array([0, 1, 2])
test_pairs, test_index = _identify_sample_groups(self.meta,
'INT',
['SEX', 'ABX'],
order=['N', 'Y'],
strict_match=False)
self.assertEqual(known_pairs.keys(), test_pairs.keys())
self.assertEqual(sorted(known_pairs.values()),
sorted(test_pairs.values()))
npt.assert_array_equal(known_index, test_index)
def test__draw_paired_samples(self):
num_samps = 3
known_sets = [{'GW', 'SR', 'TS', 'MM', 'PP', 'WM'},
{'CB', 'LF', 'PC', 'CD', 'MH', 'NR'}]
test_samps = _draw_paired_samples(self.meta_pairs, self.pair_index,
num_samps)
for i, t in enumerate(test_samps):
self.assertTrue(set(t).issubset(known_sets[i]))
if __name__ == '__main__':
main()
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_noRot_inst/Geneva_noRot_inst_age2/peaks_reader.py | 33 | 2761 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
#input files
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
# ---------------------------------------------------
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
savetxt('peaks', max_values, delimiter='\t')
| gpl-2.0 |
loli/sklearn-ensembletrees | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
mfjb/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
IndraVikas/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
cwu2011/scikit-learn | sklearn/ensemble/tests/test_forest.py | 20 | 35216 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert():
classifier = RandomForestClassifier()
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y) | bsd-3-clause |
google-research/google-research | ravens/ravens/models/mdn_utils.py | 1 | 4950 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture Density Networks utilities."""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
EPS = 1e-12
def pick_max_mean(pi, mu, var):
"""Prediction as the mean of the most-weighted gaussian.
Args:
pi: (batch_size, num_gaussians)
mu: (batch_size, num_gaussians * d_out)
var: (batch_size, num_gaussians)
Returns:
(batch_size, d_out) NUMPY
"""
del var
mu = tf.reshape(mu, (tf.shape(mu)[0], tf.shape(pi)[1], -1))
d_out = tf.shape(mu)[-1]
batch_size, _ = pi.shape
prediction = np.zeros((batch_size, d_out))
argmax_pi = tf.argmax(pi, axis=1) # shape (batch_size)
for i in range(batch_size):
ith_argmax_pi = argmax_pi[i].numpy()
prediction[i] = mu[i, ith_argmax_pi]
return prediction
def sample_from_pdf(pi, mu, var, num_samples=1):
"""Prediction as a sample from the gaussian mixture.
Args:
pi: (batch_size, num_gaussians)
mu: (batch_size, num_gaussians * d_out)
var: (batch_size, num_gaussians)
num_samples: Number of samples to draw from the pdf.
Returns:
(batch_size, num_samples, d_out) NUMPY
"""
pi, mu, var = pi.numpy(), mu.numpy(), var.numpy()
# apply temperature?
# pi = pi**4 # apply temp
var = var**4
pi = pi * (1 / pi.sum(1)[Ellipsis, None])
batch_size, k = pi.shape
mu = tf.reshape(mu, (tf.shape(mu)[0], tf.shape(pi)[1], -1))
d_out = tf.shape(mu)[-1]
samples = np.zeros((batch_size, num_samples, d_out))
for i in range(batch_size):
for j in range(num_samples):
idx = np.random.choice(range(k), p=pi[i])
draw = np.random.normal(mu[i, idx], np.sqrt(var[i, idx]))
samples[i, j] = draw
return samples
def multivar_gaussian_pdf(y, mu, var):
r"""Assumes covariance matrix is identity times variance.
i.e.
\Sigma = I \sigma^2
for \Sigma covariance matrix, \sigma std. deviation.
Args:
y: shape (batch_size, d)
mu: shape (batch_size, k, d)
var: shape (batch_size, k)
Returns:
float pdf value.
"""
# assert len(y.shape) == 2
# assert len(mu.shape) == 3
# assert len(var.shape) == 2
# assert tf.shape(y)[-1] == tf.shape(mu)[-1]
# assert tf.shape(mu)[1] == tf.shape(var)[-1]
# assert tf.shape(y)[0] == tf.shape(mu)[0]
# assert tf.shape(y)[0] == tf.shape(var)[0]
y = tf.expand_dims(y, 1)
d = mu.shape[-1]
dot_prod = tf.reduce_sum((y - mu)**2, (2)) # shape (batch_size, k)
exp_factor = tf.math.divide_no_nan(-1., (2. * (var))) * dot_prod
numerator = tf.math.exp(exp_factor) # shape (batch_size, k)
denominator = tf.math.sqrt((2 * np.pi * (var))**d)
return tf.math.multiply_no_nan(numerator, 1 / denominator)
def mdn_loss(y, mdn_predictions):
"""Mixture Density Network loss.
Args:
y: true "y", shape (batch_size, d_out)
mdn_predictions: tuple of:
- pi: (batch_size, num_gaussians)
- mu: (batch_size, num_gaussians * d_out)
- var: (batch_size, num_gaussians)
Returns:
loss, scalar
"""
pi, mu, var = mdn_predictions
mu = tf.reshape(mu, (tf.shape(mu)[0], tf.shape(pi)[-1], -1))
# mu now (batch_size, num_gaussians, d_out) shape
pdf = multivar_gaussian_pdf(y, mu, var)
# multiply with each pi and sum it
p = tf.multiply(
tf.clip_by_value(pdf, 1e-8, 1e8), tf.clip_by_value(pi, 1e-8, 1e8))
p = tf.reduce_sum(p, axis=1, keepdims=True)
p = -tf.math.log(tf.clip_by_value(p, 1e-8, 1e8))
# plot_mdn_predictions(y, mdn_predictions)
return tf.reduce_mean(p)
def plot_mdn_predictions(y, mdn_predictions):
"""Plot Mixture Density Network Predictions.
Args:
y: true "y", shape (batch_size, d_out)
mdn_predictions: tuple of:
- pi: (batch_size, num_gaussians)
- mu: (batch_size, num_gaussians * d_out)
- var: (batch_size, num_gaussians)
"""
_, ax = plt.subplots(1, 1)
pi, mu, var = mdn_predictions
n = 5
y = y[:n, :]
pi = pi[:n, :]
mu = mu[:n, :]
var = var[:n, :]
ax.cla()
ax.scatter(y[:, 0], y[:, 1])
mu = tf.reshape(mu, (-1, y.shape[-1]))
pi = tf.reshape(pi, (-1,))
pi = tf.clip_by_value(pi, 0.01, 1.0)
rgba_colors = np.zeros((len(pi), 4))
# for red the first column needs to be one
rgba_colors[:, 0] = 1.0
# the fourth column needs to be your alphas
rgba_colors[:, 3] = pi
ax.scatter(mu[:, 0], mu[:, 1], color=rgba_colors)
plt.draw()
plt.pause(0.001)
| apache-2.0 |
capitancambio/scikit-neuralnetwork | examples/bench_cifar10.py | 1 | 1322 | import cPickle
import numpy as np
def load(name):
with open(name, 'rb') as f:
return cPickle.load(f)
dataset1 = load('data_batch_1')
dataset2 = load('data_batch_2')
dataset3 = load('data_batch_3')
data_train = np.vstack([dataset1['data'], dataset2['data']])
labels_train = np.hstack([dataset1['labels'], dataset2['labels']])
data_train = data_train.astype('float') / 255.
labels_train = labels_train
data_test = dataset3['data'].astype('float') / 255.
labels_test = np.array(dataset3['labels'])
n_feat = data_train.shape[1]
n_targets = labels_train.max() + 1
import sys
import logging
logging.basicConfig(format="%(message)s", level=logging.DEBUG, stream=sys.stdout)
from sknn.mlp import MultiLayerPerceptronClassifier
net = MultiLayerPerceptronClassifier(
[("Rectifier", n_feat*2/3), ("Rectifier", n_feat*1/3), ("Linear", n_targets)],
n_iter=50,
n_stable=10,
learning_rate=0.005,
valid_size=0.1,
verbose=1)
net.fit(data_train, labels_train)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
expected = labels_test
predicted = net.predict(data_test)
print "Classification report for classifier %s:\n%s\n" % (
net, classification_report(expected, predicted))
print "Confusion matrix:\n%s" % confusion_matrix(expected, predicted) | bsd-3-clause |
mganeva/mantid | qt/python/mantidqt/widgets/workspacedisplay/matrix/io.py | 1 | 1540 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
from mantidqt.widgets.workspacedisplay.matrix.presenter import MatrixWorkspaceDisplay
from mantid.api import AnalysisDataService as ADS # noqa
class MatrixWorkspaceDisplayAttributes(object):
# WARNING: If you delete a tag from here instead of adding a new one, it will make old project files obsolete so
# just add an extra tag to the list e.g. ["InstrumentWidget", "IWidget"]
tags = ["MatrixWorkspaceDisplayView"]
class MatrixWorkspaceDisplayEncoder(MatrixWorkspaceDisplayAttributes):
def __init__(self):
super(MatrixWorkspaceDisplayEncoder, self).__init__()
@staticmethod
def encode(obj, _=None):
return {"workspace": obj.presenter.model._ws.name()}
@classmethod
def has_tag(cls, tag):
return tag in cls.tags
class MatrixWorkspaceDisplayDecoder(MatrixWorkspaceDisplayAttributes):
def __init__(self):
super(MatrixWorkspaceDisplayDecoder, self).__init__()
@staticmethod
def decode(obj_dic, _=None):
import matplotlib.pyplot as plt
pres = MatrixWorkspaceDisplay(ADS.retrieve(obj_dic["workspace"]), plot=plt)
return pres.container
@classmethod
def has_tag(cls, tag):
return tag in cls.tags
| gpl-3.0 |
BigTone2009/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/LPC.py | 24 | 1191 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
import essentia.standard as ess
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
lpc = ess.LPC(order=14)
N= 512
(fs, x) = UF.wavread('../../../sounds/soprano-E4.wav')
first = 20000
last = first+N
x1 = x[first:last]
X = fft(hamming(N)*x1)
mX = 20 * np.log10(abs(X[:N/2]))
coeff = lpc(x1)
Y = fft(coeff[0], N)
mY = 20 * np.log10(abs(Y[:N/2]))
plt.figure(1, figsize=(9, 5))
plt.subplot(2,1,1)
plt.plot(np.arange(first, last)/float(fs), x[first:last], 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(x[first:last]), max(x[first:last])])
plt.title('x (soprano-E4.wav)')
plt.subplot(2,1,2)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), mX-max(mX), 'r', lw=1.5, label="mX")
plt.plot(np.arange(0, fs/2.0, fs/float(N)), -mY-max(-mY)-3, 'k', lw=1.5, label="mY")
plt.legend()
plt.axis([0, fs/2, -60, 3])
plt.title('mX + mY (LPC approximation)')
plt.tight_layout()
plt.savefig('LPC.png')
plt.show()
| agpl-3.0 |
openfisca/openfisca-qt | openfisca_qt/gui/spyder_widgets/dicteditor.py | 1 | 52998 | # -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
Dictionary Editor Widget and Dialog based on Qt
"""
#TODO: Multiple selection: open as many editors (array/dict/...) as necessary,
# at the same time
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
import os, sys, datetime
from ...gui.qt.QtGui import (QMessageBox, QTableView, QItemDelegate,
QLineEdit, QVBoxLayout, QWidget, QColor,
QDialog, QDateEdit, QDialogButtonBox, QMenu,
QInputDialog, QDateTimeEdit, QApplication,
QKeySequence)
from ...gui.qt.QtCore import (Qt, QModelIndex, QAbstractTableModel, SIGNAL,
SLOT, QDateTime, Signal)
from ...gui.qt.compat import to_qvariant, from_qvariant, getsavefilename
# Local import
from ...gui.baseconfig import _
from ...gui.config import get_icon, get_font
from ...gui.utils.misc import fix_reference_name
from ...gui.utils.qthelpers import add_actions, create_action, qapplication
from ...gui.widgets.dicteditorutils import (sort_against, get_size,
get_human_readable_type, value_to_display, get_color_name,
is_known_type, FakeObject, Image, ndarray, array, MaskedArray,
unsorted_unique, try_to_eval, datestr_to_datetime,
get_numpy_dtype, is_editable_type)
if ndarray is not FakeObject:
from ...gui.widgets.arrayeditor import ArrayEditor
from ...gui.widgets.texteditor import TextEditor
from ...gui.widgets.importwizard import ImportWizard
def display_to_value(value, default_value, ignore_errors=True):
"""Convert back to value"""
value = from_qvariant(value, unicode)
try:
np_dtype = get_numpy_dtype(default_value)
if isinstance(default_value, bool):
# We must test for boolean before NumPy data types
# because `bool` class derives from `int` class
try:
value = bool(float(value))
except ValueError:
value = value.lower() == "true"
elif np_dtype is not None:
if 'complex' in str(type(default_value)):
value = np_dtype(complex(value))
else:
value = np_dtype(value)
elif isinstance(default_value, str):
value = str(value)
elif isinstance(default_value, unicode):
value = unicode(value)
elif isinstance(default_value, complex):
value = complex(value)
elif isinstance(default_value, float):
value = float(value)
elif isinstance(default_value, int):
try:
value = int(value)
except ValueError:
value = float(value)
elif isinstance(default_value, datetime.datetime):
value = datestr_to_datetime(value)
elif isinstance(default_value, datetime.date):
value = datestr_to_datetime(value).date()
elif ignore_errors:
value = try_to_eval(value)
else:
value = eval(value)
except (ValueError, SyntaxError):
if ignore_errors:
value = try_to_eval(value)
else:
raise
return value
class ProxyObject(object):
"""Dictionary proxy to an unknown object"""
def __init__(self, obj):
self.__obj__ = obj
def __len__(self):
return len(dir(self.__obj__))
def __getitem__(self, key):
return getattr(self.__obj__, key)
def __setitem__(self, key, value):
setattr(self.__obj__, key, value)
class ReadOnlyDictModel(QAbstractTableModel):
"""DictEditor Read-Only Table Model"""
def __init__(self, parent, data, title="", names=False,
truncate=True, minmax=False, collvalue=True, remote=False):
QAbstractTableModel.__init__(self, parent)
if data is None:
data = {}
self.names = names
self.truncate = truncate
self.minmax = minmax
self.collvalue = collvalue
self.remote = remote
self.header0 = None
self._data = None
self.showndata = None
self.keys = None
self.title = unicode(title) # in case title is not a string
if self.title:
self.title = self.title + ' - '
self.sizes = None
self.types = None
self.set_data(data)
def get_data(self):
"""Return model data"""
return self._data
def set_data(self, data, dictfilter=None):
"""Set model data"""
self._data = data
if dictfilter is not None and not self.remote and\
isinstance(data, (tuple, list, dict)):
data = dictfilter(data)
self.showndata = data
self.header0 = _("Index")
if self.names:
self.header0 = _("Name")
if isinstance(data, tuple):
self.keys = range(len(data))
self.title += _("Tuple")
elif isinstance(data, list):
self.keys = range(len(data))
self.title += _("List")
elif isinstance(data, dict):
self.keys = data.keys()
self.title += _("Dictionary")
if not self.names:
self.header0 = _("Key")
else:
self.keys = dir(data)
self._data = data = self.showndata = ProxyObject(data)
self.title += _("Object")
if not self.names:
self.header0 = _("Attribute")
self.title += ' ('+str(len(self.keys))+' '+ _("elements")+')'
if self.remote:
self.sizes = [ data[self.keys[index]]['size']
for index in range(len(self.keys)) ]
self.types = [ data[self.keys[index]]['type']
for index in range(len(self.keys)) ]
else:
self.sizes = [ get_size(data[self.keys[index]])
for index in range(len(self.keys)) ]
self.types = [ get_human_readable_type(data[self.keys[index]])
for index in range(len(self.keys)) ]
self.reset()
def sort(self, column, order=Qt.AscendingOrder):
"""Overriding sort method"""
reverse = (order==Qt.DescendingOrder)
if column == 0:
self.sizes = sort_against(self.sizes, self.keys, reverse)
self.types = sort_against(self.types, self.keys, reverse)
self.keys.sort(reverse=reverse)
elif column == 1:
self.keys = sort_against(self.keys, self.types, reverse)
self.sizes = sort_against(self.sizes, self.types, reverse)
self.types.sort(reverse=reverse)
elif column == 2:
self.keys = sort_against(self.keys, self.sizes, reverse)
self.types = sort_against(self.types, self.sizes, reverse)
self.sizes.sort(reverse=reverse)
elif column == 3:
self.keys = sort_against(self.keys, self.sizes, reverse)
self.types = sort_against(self.types, self.sizes, reverse)
self.sizes.sort(reverse=reverse)
elif column == 4:
values = [self._data[key] for key in self.keys]
self.keys = sort_against(self.keys, values, reverse)
self.sizes = sort_against(self.sizes, values, reverse)
self.types = sort_against(self.types, values, reverse)
self.reset()
def columnCount(self, qindex=QModelIndex()):
"""Array column number"""
return 4
def rowCount(self, qindex=QModelIndex()):
"""Array row number"""
return len(self.keys)
def get_index_from_key(self, key):
try:
return self.createIndex(self.keys.index(key), 0)
except ValueError:
return QModelIndex()
def get_key(self, index):
"""Return current key"""
return self.keys[index.row()]
def get_value(self, index):
"""Return current value"""
if index.column() == 0:
return self.keys[ index.row() ]
elif index.column() == 1:
return self.types[ index.row() ]
elif index.column() == 2:
return self.sizes[ index.row() ]
else:
return self._data[ self.keys[index.row()] ]
def get_bgcolor(self, index):
"""Background color depending on value"""
if index.column() == 0:
color = QColor(Qt.lightGray)
color.setAlphaF(.05)
elif index.column() < 3:
color = QColor(Qt.lightGray)
color.setAlphaF(.2)
else:
color = QColor(Qt.lightGray)
color.setAlphaF(.3)
return color
def data(self, index, role=Qt.DisplayRole):
"""Cell content"""
if not index.isValid():
return to_qvariant()
value = self.get_value(index)
if index.column() == 3 and self.remote:
value = value['view']
display = value_to_display(value,
truncate=index.column() == 3 and self.truncate,
minmax=self.minmax,
collvalue=self.collvalue or index.column() != 3)
if role == Qt.DisplayRole:
return to_qvariant(display)
elif role == Qt.EditRole:
return to_qvariant(value_to_display(value))
elif role == Qt.TextAlignmentRole:
if index.column() == 3:
if len(display.splitlines()) < 3:
return to_qvariant(int(Qt.AlignLeft|Qt.AlignVCenter))
else:
return to_qvariant(int(Qt.AlignLeft|Qt.AlignTop))
else:
return to_qvariant(int(Qt.AlignLeft|Qt.AlignVCenter))
elif role == Qt.BackgroundColorRole:
return to_qvariant( self.get_bgcolor(index) )
elif role == Qt.FontRole:
if index.column() < 3:
return to_qvariant(get_font('dicteditor_header'))
else:
return to_qvariant(get_font('dicteditor'))
return to_qvariant()
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Overriding method headerData"""
if role != Qt.DisplayRole:
if role == Qt.FontRole:
return to_qvariant(get_font('dicteditor_header'))
else:
return to_qvariant()
i_column = int(section)
if orientation == Qt.Horizontal:
headers = (self.header0, _("Type"), _("Size"), _("Value"))
return to_qvariant( headers[i_column] )
else:
return to_qvariant()
def flags(self, index):
"""Overriding method flags"""
# This method was implemented in DictModel only, but to enable tuple
# exploration (even without editing), this method was moved here
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
class DictModel(ReadOnlyDictModel):
"""DictEditor Table Model"""
def set_value(self, index, value):
"""Set value"""
self._data[ self.keys[index.row()] ] = value
self.showndata[ self.keys[index.row()] ] = value
self.sizes[index.row()] = get_size(value)
self.types[index.row()] = get_human_readable_type(value)
def get_bgcolor(self, index):
"""Background color depending on value"""
value = self.get_value(index)
if index.column() < 3:
color = ReadOnlyDictModel.get_bgcolor(self, index)
else:
if self.remote:
color_name = value['color']
else:
color_name = get_color_name(value)
color = QColor(color_name)
color.setAlphaF(.2)
return color
def setData(self, index, value, role=Qt.EditRole):
"""Cell content change"""
if not index.isValid():
return False
if index.column() < 3:
return False
value = display_to_value(value, self.get_value(index),
ignore_errors=True)
self.set_value(index, value)
self.emit(SIGNAL("dataChanged(QModelIndex,QModelIndex)"),
index, index)
return True
class DictDelegate(QItemDelegate):
"""DictEditor Item Delegate"""
def __init__(self, parent=None, inplace=False):
QItemDelegate.__init__(self, parent)
self.inplace = inplace
self._editors = {} # keep references on opened editors
def get_value(self, index):
if index.isValid():
return index.model().get_value(index)
def set_value(self, index, value):
if index.isValid():
index.model().set_value(index, value)
def createEditor(self, parent, option, index):
"""Overriding method createEditor"""
if index.column() < 3:
return None
try:
value = self.get_value(index)
except Exception, msg:
QMessageBox.critical(self.parent(), _("Edit item"),
_("<b>Unable to retrieve data.</b>"
"<br><br>Error message:<br>%s"
) % unicode(msg))
return
key = index.model().get_key(index)
readonly = isinstance(value, tuple) or self.parent().readonly \
or not is_known_type(value)
#---editor = DictEditor
if isinstance(value, (list, tuple, dict)) and not self.inplace:
editor = DictEditor()
editor.setup(value, key, icon=self.parent().windowIcon(),
readonly=readonly)
self.create_dialog(editor, dict(model=index.model(), editor=editor,
key=key, readonly=readonly))
return None
#---editor = ArrayEditor
elif isinstance(value, (ndarray, MaskedArray))\
and ndarray is not FakeObject and not self.inplace:
if value.size == 0:
return None
editor = ArrayEditor(parent)
if not editor.setup_and_check(value, title=key, readonly=readonly):
return
self.create_dialog(editor, dict(model=index.model(), editor=editor,
key=key, readonly=readonly))
return None
#---showing image
elif isinstance(value, Image) and ndarray is not FakeObject \
and Image is not FakeObject:
arr = array(value)
if arr.size == 0:
return None
editor = ArrayEditor(parent)
if not editor.setup_and_check(arr, title=key, readonly=readonly):
return
conv_func = lambda arr: Image.fromarray(arr, mode=value.mode)
self.create_dialog(editor, dict(model=index.model(), editor=editor,
key=key, readonly=readonly,
conv=conv_func))
return None
#---editor = QDateTimeEdit
elif isinstance(value, datetime.datetime) and not self.inplace:
editor = QDateTimeEdit(value, parent)
editor.setCalendarPopup(True)
editor.setFont(get_font('dicteditor'))
self.connect(editor, SIGNAL("returnPressed()"),
self.commitAndCloseEditor)
return editor
#---editor = QDateEdit
elif isinstance(value, datetime.date) and not self.inplace:
editor = QDateEdit(value, parent)
editor.setCalendarPopup(True)
editor.setFont(get_font('dicteditor'))
self.connect(editor, SIGNAL("returnPressed()"),
self.commitAndCloseEditor)
return editor
#---editor = QTextEdit
elif isinstance(value, (str, unicode)) and len(value)>40:
editor = TextEditor(value, key)
self.create_dialog(editor, dict(model=index.model(), editor=editor,
key=key, readonly=readonly))
return None
#---editor = QLineEdit
elif self.inplace or is_editable_type(value):
editor = QLineEdit(parent)
editor.setFont(get_font('dicteditor'))
editor.setAlignment(Qt.AlignLeft)
self.connect(editor, SIGNAL("returnPressed()"),
self.commitAndCloseEditor)
return editor
#---editor = DictEditor for an arbitrary object
else:
editor = DictEditor()
editor.setup(value, key, icon=self.parent().windowIcon(),
readonly=readonly)
self.create_dialog(editor, dict(model=index.model(), editor=editor,
key=key, readonly=readonly))
return None
def create_dialog(self, editor, data):
self._editors[id(editor)] = data
self.connect(editor, SIGNAL('accepted()'),
lambda eid=id(editor): self.editor_accepted(eid))
self.connect(editor, SIGNAL('rejected()'),
lambda eid=id(editor): self.editor_rejected(eid))
editor.show()
def editor_accepted(self, editor_id):
data = self._editors[editor_id]
if not data['readonly']:
index = data['model'].get_index_from_key(data['key'])
value = data['editor'].get_value()
conv_func = data.get('conv', lambda v: v)
self.set_value(index, conv_func(value))
self._editors.pop(editor_id)
def editor_rejected(self, editor_id):
self._editors.pop(editor_id)
def commitAndCloseEditor(self):
"""Overriding method commitAndCloseEditor"""
editor = self.sender()
self.emit(SIGNAL("commitData(QWidget*)"), editor)
self.emit(SIGNAL("closeEditor(QWidget*)"), editor)
def setEditorData(self, editor, index):
"""Overriding method setEditorData
Model --> Editor"""
value = self.get_value(index)
if isinstance(editor, QLineEdit):
if not isinstance(value, basestring):
value = repr(value)
editor.setText(value)
elif isinstance(editor, QDateEdit):
editor.setDate(value)
elif isinstance(editor, QDateTimeEdit):
editor.setDateTime(QDateTime(value.date(), value.time()))
def setModelData(self, editor, model, index):
"""Overriding method setModelData
Editor --> Model"""
if not hasattr(model, "set_value"):
# Read-only mode
return
if isinstance(editor, QLineEdit):
value = editor.text()
try:
value = display_to_value(to_qvariant(value),
self.get_value(index),
ignore_errors=False)
except Exception, msg:
raise
QMessageBox.critical(editor, _("Edit item"),
_("<b>Unable to assign data to item.</b>"
"<br><br>Error message:<br>%s"
) % str(msg))
return
elif isinstance(editor, QDateEdit):
qdate = editor.date()
value = datetime.date( qdate.year(), qdate.month(), qdate.day() )
elif isinstance(editor, QDateTimeEdit):
qdatetime = editor.dateTime()
qdate = qdatetime.date()
qtime = qdatetime.time()
value = datetime.datetime( qdate.year(), qdate.month(),
qdate.day(), qtime.hour(),
qtime.minute(), qtime.second() )
else:
# Should not happen...
raise RuntimeError("Unsupported editor widget")
self.set_value(index, value)
class BaseTableView(QTableView):
"""Base dictionnary editor table view"""
sig_option_changed = Signal(str, object)
def __init__(self, parent):
QTableView.__init__(self, parent)
self.array_filename = None
self.menu = None
self.empty_ws_menu = None
self.paste_action = None
self.copy_action = None
self.edit_action = None
self.plot_action = None
self.hist_action = None
self.imshow_action = None
self.save_array_action = None
self.insert_action = None
self.remove_action = None
self.truncate_action = None
self.minmax_action = None
self.collvalue_action = None
self.inplace_action = None
self.rename_action = None
self.duplicate_action = None
self.delegate = None
def setup_table(self):
"""Setup table"""
self.horizontalHeader().setStretchLastSection(True)
self.adjust_columns()
# Sorting columns
self.setSortingEnabled(True)
self.sortByColumn(0, Qt.AscendingOrder)
def setup_menu(self, truncate, minmax, inplace, collvalue):
"""Setup context menu"""
if self.truncate_action is not None:
self.truncate_action.setChecked(truncate)
self.minmax_action.setChecked(minmax)
self.inplace_action.setChecked(inplace)
self.collvalue_action.setChecked(collvalue)
return
resize_action = create_action(self, _("Resize rows to contents"),
triggered=self.resizeRowsToContents)
self.paste_action = create_action(self, _("Paste"),
icon=get_icon('editpaste.png'),
triggered=self.paste)
self.copy_action = create_action(self, _("Copy"),
icon=get_icon('editcopy.png'),
triggered=self.copy)
self.edit_action = create_action(self, _("Edit"),
icon=get_icon('edit.png'),
triggered=self.edit_item)
self.plot_action = create_action(self, _("Plot"),
icon=get_icon('plot.png'),
triggered=lambda: self.plot_item('plot'))
self.plot_action.setVisible(False)
self.hist_action = create_action(self, _("Histogram"),
icon=get_icon('hist.png'),
triggered=lambda: self.plot_item('hist'))
self.hist_action.setVisible(False)
self.imshow_action = create_action(self, _("Show image"),
icon=get_icon('imshow.png'),
triggered=self.imshow_item)
self.imshow_action.setVisible(False)
self.save_array_action = create_action(self, _("Save array"),
icon=get_icon('filesave.png'),
triggered=self.save_array)
self.save_array_action.setVisible(False)
self.insert_action = create_action(self, _("Insert"),
icon=get_icon('insert.png'),
triggered=self.insert_item)
self.remove_action = create_action(self, _("Remove"),
icon=get_icon('editdelete.png'),
triggered=self.remove_item)
self.truncate_action = create_action(self, _("Truncate values"),
toggled=self.toggle_truncate)
self.truncate_action.setChecked(truncate)
self.toggle_truncate(truncate)
self.minmax_action = create_action(self, _("Show arrays min/max"),
toggled=self.toggle_minmax)
self.minmax_action.setChecked(minmax)
self.toggle_minmax(minmax)
self.collvalue_action = create_action(self,
_("Show collection contents"),
toggled=self.toggle_collvalue)
self.collvalue_action.setChecked(collvalue)
self.toggle_collvalue(collvalue)
self.inplace_action = create_action(self, _("Always edit in-place"),
toggled=self.toggle_inplace)
self.inplace_action.setChecked(inplace)
if self.delegate is None:
self.inplace_action.setEnabled(False)
else:
self.toggle_inplace(inplace)
self.rename_action = create_action(self, _( "Rename"),
icon=get_icon('rename.png'),
triggered=self.rename_item)
self.duplicate_action = create_action(self, _( "Duplicate"),
icon=get_icon('edit_add.png'),
triggered=self.duplicate_item)
menu = QMenu(self)
menu_actions = [self.edit_action, self.plot_action, self.hist_action,
self.imshow_action, self.save_array_action,
self.insert_action, self.remove_action,
self.copy_action, self.paste_action,
None, self.rename_action,self.duplicate_action,
None, resize_action, None, self.truncate_action,
self.inplace_action, self.collvalue_action]
if ndarray is not FakeObject:
menu_actions.append(self.minmax_action)
add_actions(menu, menu_actions)
self.empty_ws_menu = QMenu(self)
add_actions(self.empty_ws_menu,
[self.insert_action, self.paste_action,
None, resize_action])
return menu
#------ Remote/local API ---------------------------------------------------
def remove_values(self, keys):
"""Remove values from data"""
raise NotImplementedError
def copy_value(self, orig_key, new_key):
"""Copy value"""
raise NotImplementedError
def new_value(self, key, value):
"""Create new value in data"""
raise NotImplementedError
def is_list(self, key):
"""Return True if variable is a list or a tuple"""
raise NotImplementedError
def get_len(self, key):
"""Return sequence length"""
raise NotImplementedError
def is_array(self, key):
"""Return True if variable is a numpy array"""
raise NotImplementedError
def is_image(self, key):
"""Return True if variable is a PIL.Image image"""
raise NotImplementedError
def is_dict(self, key):
"""Return True if variable is a dictionary"""
raise NotImplementedError
def get_array_shape(self, key):
"""Return array's shape"""
raise NotImplementedError
def get_array_ndim(self, key):
"""Return array's ndim"""
raise NotImplementedError
def oedit(self, key):
"""Edit item"""
raise NotImplementedError
def plot(self, key, funcname):
"""Plot item"""
raise NotImplementedError
def imshow(self, key):
"""Show item's image"""
raise NotImplementedError
def show_image(self, key):
"""Show image (item is a PIL image)"""
raise NotImplementedError
#---------------------------------------------------------------------------
def refresh_menu(self):
"""Refresh context menu"""
index = self.currentIndex()
condition = index.isValid()
self.edit_action.setEnabled( condition )
self.remove_action.setEnabled( condition )
self.refresh_plot_entries(index)
def refresh_plot_entries(self, index):
if index.isValid():
key = self.model.get_key(index)
is_list = self.is_list(key)
is_array = self.is_array(key) and self.get_len(key) != 0
condition_plot = (is_array and len(self.get_array_shape(key)) <= 2)
condition_hist = (is_array and self.get_array_ndim(key) == 1)
condition_imshow = condition_plot and self.get_array_ndim(key) == 2
condition_imshow = condition_imshow or self.is_image(key)
else:
is_array = condition_plot = condition_imshow = is_list \
= condition_hist = False
self.plot_action.setVisible(condition_plot or is_list)
self.hist_action.setVisible(condition_hist or is_list)
self.imshow_action.setVisible(condition_imshow)
self.save_array_action.setVisible(is_array)
def adjust_columns(self):
"""Resize two first columns to contents"""
for col in range(3):
self.resizeColumnToContents(col)
def set_data(self, data):
"""Set table data"""
if data is not None:
self.model.set_data(data, self.dictfilter)
self.sortByColumn(0, Qt.AscendingOrder)
def mousePressEvent(self, event):
"""Reimplement Qt method"""
if event.button() != Qt.LeftButton:
QTableView.mousePressEvent(self, event)
return
index_clicked = self.indexAt(event.pos())
if index_clicked.isValid():
if index_clicked == self.currentIndex() \
and index_clicked in self.selectedIndexes():
self.clearSelection()
else:
QTableView.mousePressEvent(self, event)
else:
self.clearSelection()
event.accept()
def mouseDoubleClickEvent(self, event):
"""Reimplement Qt method"""
index_clicked = self.indexAt(event.pos())
if index_clicked.isValid():
self.edit_item()
else:
event.accept()
def keyPressEvent(self, event):
"""Reimplement Qt methods"""
if event.key() == Qt.Key_Delete:
self.remove_item()
elif event.key() == Qt.Key_F2:
self.rename_item()
elif event == QKeySequence.Copy:
self.copy()
elif event == QKeySequence.Paste:
self.paste()
else:
QTableView.keyPressEvent(self, event)
def contextMenuEvent(self, event):
"""Reimplement Qt method"""
if self.model.showndata:
self.refresh_menu()
self.menu.popup(event.globalPos())
event.accept()
else:
self.empty_ws_menu.popup(event.globalPos())
event.accept()
def toggle_inplace(self, state):
"""Toggle in-place editor option"""
self.sig_option_changed.emit('inplace', state)
self.delegate.inplace = state
def toggle_truncate(self, state):
"""Toggle display truncating option"""
self.sig_option_changed.emit('truncate', state)
self.model.truncate = state
def toggle_minmax(self, state):
"""Toggle min/max display for numpy arrays"""
self.sig_option_changed.emit('minmax', state)
self.model.minmax = state
def toggle_collvalue(self, state):
"""Toggle value display for collections"""
self.sig_option_changed.emit('collvalue', state)
self.model.collvalue = state
def edit_item(self):
"""Edit item"""
index = self.currentIndex()
if not index.isValid():
return
self.edit(index)
def remove_item(self):
"""Remove item"""
indexes = self.selectedIndexes()
if not indexes:
return
for index in indexes:
if not index.isValid():
return
one = _("Do you want to remove selected item?")
more = _("Do you want to remove all selected items?")
answer = QMessageBox.question(self, _( "Remove"),
one if len(indexes) == 1 else more,
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
idx_rows = unsorted_unique(map(lambda idx: idx.row(), indexes))
keys = [ self.model.keys[idx_row] for idx_row in idx_rows ]
self.remove_values(keys)
def copy_item(self, erase_original=False):
"""Copy item"""
indexes = self.selectedIndexes()
if not indexes:
return
idx_rows = unsorted_unique(map(lambda idx: idx.row(), indexes))
if len(idx_rows) > 1 or not indexes[0].isValid():
return
orig_key = self.model.keys[idx_rows[0]]
new_key, valid = QInputDialog.getText(self, _( 'Rename'), _( 'Key:'),
QLineEdit.Normal,orig_key)
if valid and unicode(new_key):
new_key = try_to_eval(unicode(new_key))
if new_key == orig_key:
return
self.copy_value(orig_key, new_key)
if erase_original:
self.remove_values([orig_key])
def duplicate_item(self):
"""Duplicate item"""
self.copy_item()
def rename_item(self):
"""Rename item"""
self.copy_item(True)
def insert_item(self):
"""Insert item"""
index = self.currentIndex()
if not index.isValid():
row = self.model.rowCount()
else:
row = index.row()
data = self.model.get_data()
if isinstance(data, list):
key = row
data.insert(row, '')
elif isinstance(data, dict):
key, valid = QInputDialog.getText(self, _( 'Insert'), _( 'Key:'),
QLineEdit.Normal)
if valid and unicode(key):
key = try_to_eval(unicode(key))
else:
return
else:
return
value, valid = QInputDialog.getText(self, _('Insert'), _('Value:'),
QLineEdit.Normal)
if valid and unicode(value):
self.new_value(key, try_to_eval(unicode(value)))
def __prepare_plot(self):
try:
import guiqwt.pyplot #analysis:ignore
return True
except ImportError:
try:
if 'matplotlib' not in sys.modules:
import matplotlib
matplotlib.use("Qt4Agg")
return True
except ImportError:
QMessageBox.warning(self, _("Import error"),
_("Please install <b>matplotlib</b>"
" or <b>guiqwt</b>."))
def plot_item(self, funcname):
"""Plot item"""
index = self.currentIndex()
if self.__prepare_plot():
key = self.model.get_key(index)
try:
self.plot(key, funcname)
except (ValueError, TypeError), error:
QMessageBox.critical(self, _( "Plot"),
_("<b>Unable to plot data.</b>"
"<br><br>Error message:<br>%s"
) % str(error))
def imshow_item(self):
"""Imshow item"""
index = self.currentIndex()
if self.__prepare_plot():
key = self.model.get_key(index)
try:
if self.is_image(key):
self.show_image(key)
else:
self.imshow(key)
except (ValueError, TypeError), error:
QMessageBox.critical(self, _( "Plot"),
_("<b>Unable to show image.</b>"
"<br><br>Error message:<br>%s"
) % str(error))
def save_array(self):
"""Save array"""
title = _( "Save array")
if self.array_filename is None:
self.array_filename = os.getcwdu()
self.emit(SIGNAL('redirect_stdio(bool)'), False)
filename, _selfilter = getsavefilename(self, title,
self.array_filename,
_("NumPy arrays")+" (*.npy)")
self.emit(SIGNAL('redirect_stdio(bool)'), True)
if filename:
self.array_filename = filename
data = self.delegate.get_value( self.currentIndex() )
try:
import numpy as np
np.save(self.array_filename, data)
except Exception, error:
QMessageBox.critical(self, title,
_("<b>Unable to save array</b>"
"<br><br>Error message:<br>%s"
) % str(error))
def copy(self):
"""Copy text to clipboard"""
clipboard = QApplication.clipboard()
clipl = []
for idx in self.selectedIndexes():
if not idx.isValid():
continue
clipl.append(unicode(self.delegate.get_value(idx)))
clipboard.setText(u'\n'.join(clipl))
def import_from_string(self, text, title=None):
"""Import data from string"""
data = self.model.get_data()
editor = ImportWizard(self, text, title=title,
contents_title=_("Clipboard contents"),
varname=fix_reference_name("data",
blacklist=data.keys()))
if editor.exec_():
var_name, clip_data = editor.get_data()
self.new_value(var_name, clip_data)
def paste(self):
"""Import text/data/code from clipboard"""
clipboard = QApplication.clipboard()
cliptext = u""
if clipboard.mimeData().hasText():
cliptext = unicode(clipboard.text())
if cliptext.strip():
self.import_from_string(cliptext, title=_("Import from clipboard"))
else:
QMessageBox.warning(self, _( "Empty clipboard"),
_("Nothing to be imported from clipboard."))
class DictEditorTableView(BaseTableView):
"""DictEditor table view"""
def __init__(self, parent, data, readonly=False, title="",
names=False, truncate=True, minmax=False,
inplace=False, collvalue=True):
BaseTableView.__init__(self, parent)
self.dictfilter = None
self.readonly = readonly or isinstance(data, tuple)
DictModelClass = ReadOnlyDictModel if self.readonly else DictModel
self.model = DictModelClass(self, data, title, names=names,
truncate=truncate, minmax=minmax,
collvalue=collvalue)
self.setModel(self.model)
self.delegate = DictDelegate(self, inplace=inplace)
self.setItemDelegate(self.delegate)
self.setup_table()
self.menu = self.setup_menu(truncate, minmax, inplace, collvalue)
#------ Remote/local API ---------------------------------------------------
def remove_values(self, keys):
"""Remove values from data"""
data = self.model.get_data()
for key in sorted(keys,reverse=True):
data.pop(key)
self.set_data(data)
def copy_value(self, orig_key, new_key):
"""Copy value"""
data = self.model.get_data()
data[new_key] = data[orig_key]
self.set_data(data)
def new_value(self, key, value):
"""Create new value in data"""
data = self.model.get_data()
data[key] = value
self.set_data(data)
def is_list(self, key):
"""Return True if variable is a list or a tuple"""
data = self.model.get_data()
return isinstance(data[key], (tuple, list))
def get_len(self, key):
"""Return sequence length"""
data = self.model.get_data()
return len(data[key])
def is_array(self, key):
"""Return True if variable is a numpy array"""
data = self.model.get_data()
return isinstance(data[key], (ndarray, MaskedArray))
def is_image(self, key):
"""Return True if variable is a PIL.Image image"""
data = self.model.get_data()
return isinstance(data[key], Image)
def is_dict(self, key):
"""Return True if variable is a dictionary"""
data = self.model.get_data()
return isinstance(data[key], dict)
def get_array_shape(self, key):
"""Return array's shape"""
data = self.model.get_data()
return data[key].shape
def get_array_ndim(self, key):
"""Return array's ndim"""
data = self.model.get_data()
return data[key].ndim
def oedit(self, key):
"""Edit item"""
data = self.model.get_data()
from ...gui.spyder_widgets.objecteditor import oedit
oedit(data[key])
def plot(self, key, funcname):
"""Plot item"""
data = self.model.get_data()
from ...gui import pyplot as plt
plt.figure()
getattr(plt, funcname)(data[key])
plt.show()
def imshow(self, key):
"""Show item's image"""
data = self.model.get_data()
from ...gui import pyplot as plt
plt.figure()
plt.imshow(data[key])
plt.show()
def show_image(self, key):
"""Show image (item is a PIL image)"""
data = self.model.get_data()
data[key].show()
#---------------------------------------------------------------------------
def refresh_menu(self):
"""Refresh context menu"""
data = self.model.get_data()
index = self.currentIndex()
condition = (not isinstance(data, tuple)) and index.isValid() \
and not self.readonly
self.edit_action.setEnabled( condition )
self.remove_action.setEnabled( condition )
self.insert_action.setEnabled( not self.readonly )
self.refresh_plot_entries(index)
def set_filter(self, dictfilter=None):
"""Set table dict filter"""
self.dictfilter = dictfilter
class DictEditorWidget(QWidget):
"""Dictionary Editor Dialog"""
def __init__(self, parent, data, readonly=False, title="", remote=False):
QWidget.__init__(self, parent)
if remote:
self.editor = RemoteDictEditorTableView(self, data, readonly)
else:
self.editor = DictEditorTableView(self, data, readonly, title)
layout = QVBoxLayout()
layout.addWidget(self.editor)
self.setLayout(layout)
def set_data(self, data):
"""Set DictEditor data"""
self.editor.set_data(data)
def get_title(self):
"""Get model title"""
return self.editor.model.title
class DictEditor(QDialog):
"""Dictionary/List Editor Dialog"""
def __init__(self, parent=None):
QDialog.__init__(self, parent)
# Destroying the C++ object right after closing the dialog box,
# otherwise it may be garbage-collected in another QThread
# (e.g. the editor's analysis thread in Spyder), thus leading to
# a segmentation fault on UNIX or an application crash on Windows
self.setAttribute(Qt.WA_DeleteOnClose)
self.data_copy = None
self.widget = None
def setup(self, data, title='', readonly=False, width=500,
icon='dictedit.png', remote=False, parent=None):
if isinstance(data, dict):
# dictionnary
self.data_copy = data.copy()
datalen = len(data)
elif isinstance(data, (tuple, list)):
# list, tuple
self.data_copy = data[:]
datalen = len(data)
else:
# unknown object
import copy
self.data_copy = copy.deepcopy(data)
datalen = len(dir(data))
self.widget = DictEditorWidget(self, self.data_copy, title=title,
readonly=readonly, remote=remote)
layout = QVBoxLayout()
layout.addWidget(self.widget)
self.setLayout(layout)
# Buttons configuration
buttons = QDialogButtonBox.Ok
if not readonly:
buttons = buttons | QDialogButtonBox.Cancel
bbox = QDialogButtonBox(buttons)
self.connect(bbox, SIGNAL("accepted()"), SLOT("accept()"))
if not readonly:
self.connect(bbox, SIGNAL("rejected()"), SLOT("reject()"))
layout.addWidget(bbox)
constant = 121
row_height = 30
error_margin = 20
height = constant + row_height*min([20, datalen]) + error_margin
self.resize(width, height)
self.setWindowTitle(self.widget.get_title())
if isinstance(icon, (str, unicode)):
icon = get_icon(icon)
self.setWindowIcon(icon)
# Make the dialog act as a window
self.setWindowFlags(Qt.Window)
def get_value(self):
"""Return modified copy of dictionary or list"""
# It is import to avoid accessing Qt C++ object as it has probably
# already been destroyed, due to the Qt.WA_DeleteOnClose attribute
return self.data_copy
#----Remote versions of DictDelegate and DictEditorTableView
class RemoteDictDelegate(DictDelegate):
"""DictEditor Item Delegate"""
def __init__(self, parent=None, inplace=False,
get_value_func=None, set_value_func=None):
DictDelegate.__init__(self, parent, inplace=inplace)
self.get_value_func = get_value_func
self.set_value_func = set_value_func
def get_value(self, index):
if index.isValid():
name = index.model().keys[index.row()]
return self.get_value_func(name)
def set_value(self, index, value):
if index.isValid():
name = index.model().keys[index.row()]
self.set_value_func(name, value)
class RemoteDictEditorTableView(BaseTableView):
"""DictEditor table view"""
def __init__(self, parent, data, truncate=True, minmax=False,
inplace=False, collvalue=True, remote_editing=False,
get_value_func=None, set_value_func=None,
new_value_func=None, remove_values_func=None,
copy_value_func=None, is_list_func=None, get_len_func=None,
is_array_func=None, is_image_func=None, is_dict_func=None,
get_array_shape_func=None, get_array_ndim_func=None,
oedit_func=None, plot_func=None, imshow_func=None,
show_image_func=None):
BaseTableView.__init__(self, parent)
self.remote_editing_enabled = None
self.remove_values = remove_values_func
self.copy_value = copy_value_func
self.new_value = new_value_func
self.is_list = is_list_func
self.get_len = get_len_func
self.is_array = is_array_func
self.is_image = is_image_func
self.is_dict = is_dict_func
self.get_array_shape = get_array_shape_func
self.get_array_ndim = get_array_ndim_func
self.oedit = oedit_func
self.plot = plot_func
self.imshow = imshow_func
self.show_image = show_image_func
self.dictfilter = None
self.model = None
self.delegate = None
self.readonly = False
self.model = DictModel(self, data, names=True,
truncate=truncate, minmax=minmax,
collvalue=collvalue, remote=True)
self.setModel(self.model)
self.delegate = RemoteDictDelegate(self, inplace,
get_value_func, set_value_func)
self.setItemDelegate(self.delegate)
self.setup_table()
self.menu = self.setup_menu(truncate, minmax, inplace, collvalue,
remote_editing)
def setup_menu(self, truncate, minmax, inplace, collvalue, remote_editing):
"""Setup context menu"""
menu = BaseTableView.setup_menu(self, truncate, minmax,
inplace, collvalue)
if menu is None:
self.remote_editing_action.setChecked(remote_editing)
return
self.remote_editing_action = create_action(self,
_( "Edit data in the remote process"),
tip=_("Editors are opened in the remote process for NumPy "
"arrays, PIL images, lists, tuples and dictionaries.\n"
"This avoids transfering large amount of data between "
"the remote process and Spyder (through the socket)."),
toggled=self.toggle_remote_editing)
self.remote_editing_action.setChecked(remote_editing)
self.toggle_remote_editing(remote_editing)
add_actions(menu, (self.remote_editing_action,))
return menu
def toggle_remote_editing(self, state):
"""Toggle remote editing state"""
self.sig_option_changed.emit('remote_editing', state)
self.remote_editing_enabled = state
def oedit_possible(self, key):
if (self.is_list(key) or self.is_dict(key)
or self.is_array(key) or self.is_image(key)):
# If this is a remote dict editor, the following avoid
# transfering large amount of data through the socket
return True
def edit_item(self):
"""
Reimplement BaseTableView's method to edit item
Some supported data types are directly edited in the remote process,
thus avoiding to transfer large amount of data through the socket from
the remote process to Spyder
"""
if self.remote_editing_enabled:
index = self.currentIndex()
if not index.isValid():
return
key = self.model.get_key(index)
if self.oedit_possible(key):
# If this is a remote dict editor, the following avoid
# transfering large amount of data through the socket
self.oedit(key)
else:
BaseTableView.edit_item(self)
else:
BaseTableView.edit_item(self)
def get_test_data():
"""Create test data"""
import numpy as np
# from spyderlib.pil_patch import Image
import Image
image = Image.fromarray(np.random.random_integers(255, size=(100, 100)))
testdict = {'d': 1, 'a': np.random.rand(10, 10), 'b': [1, 2]}
testdate = datetime.date(1945, 5, 8)
class Foobar(object):
def __init__(self):
self.text = "toto"
self.testdict = testdict
self.testdate = testdate
foobar = Foobar()
return {'object': foobar,
'str': 'kjkj kj k j j kj k jkj',
'unicode': u'éù',
'list': [1, 3, [sorted, 5, 6], 'kjkj', None],
'tuple': ([1, testdate, testdict], 'kjkj', None),
'dict': testdict,
'float': 1.2233,
'int': 223,
'bool': True,
'array': np.random.rand(10, 10),
'masked_array': np.ma.array([[1, 0], [1, 0]],
mask=[[True, False], [False, False]]),
'1D-array': np.linspace(-10, 10),
'empty_array': np.array([]),
'image': image,
'date': testdate,
'datetime': datetime.datetime(1945, 5, 8),
'complex': 2+1j,
'complex64': np.complex64(2+1j),
'int8_scalar': np.int8(8),
'int16_scalar': np.int16(16),
'int32_scalar': np.int32(32),
'bool_scalar': np.bool(8),
'unsupported1': np.arccos,
'unsupported2': np.cast,
1: (1, 2, 3), -5: ("a", "b", "c"), 2.5: np.array((4.0, 6.0, 8.0)),
}
def test():
"""Dictionary editor test"""
app = qapplication() #analysis:ignore
dialog = DictEditor()
dialog.setup(get_test_data())
dialog.show()
app.exec_()
print "out:", dialog.get_value()
def remote_editor_test():
"""Remote dictionary editor test"""
from ...gui.plugins.variableexplorer import VariableExplorer
from ...gui.spyder_widgets.externalshell.monitor import make_remote_view
remote = make_remote_view(get_test_data(), VariableExplorer.get_settings())
from pprint import pprint
pprint(remote)
app = qapplication()
dialog = DictEditor(remote, remote=True)
dialog.show()
app.exec_()
if dialog.result():
print dialog.get_value()
if __name__ == "__main__":
test()
| agpl-3.0 |
3manuek/scikit-learn | sklearn/kernel_ridge.py | 44 | 6504 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
tardis-sn/tardis | tardis/analysis.py | 1 | 15410 | # codes to for analyse the model.
import re
import os
from astropy import units as u
from tardis import constants
import numpy as np
import pandas as pd
class LastLineInteraction(object):
@classmethod
def from_model(cls, model, packet_filter_mode="packet_out_nu"):
return cls(
model.runner.last_line_interaction_in_id,
model.runner.last_line_interaction_out_id,
model.runner.last_line_interaction_shell_id,
model.runner.output_nu,
model.runner.last_interaction_in_nu,
model.plasma.atomic_data.lines,
packet_filter_mode,
)
def __init__(
self,
last_line_interaction_in_id,
last_line_interaction_out_id,
last_line_interaction_shell_id,
output_nu,
input_nu,
lines,
packet_filter_mode="packet_out_nu",
):
# mask out packets which did not perform a line interaction
# TODO mask out packets which do not escape to observer?
mask = last_line_interaction_out_id != -1
self.last_line_interaction_in_id = last_line_interaction_in_id[mask]
self.last_line_interaction_out_id = last_line_interaction_out_id[mask]
self.last_line_interaction_shell_id = last_line_interaction_shell_id[
mask
]
self.last_line_interaction_out_angstrom = u.Quantity(
output_nu[mask], "Hz"
).to(u.Angstrom, equivalencies=u.spectral())
self.last_line_interaction_in_angstrom = u.Quantity(
input_nu[mask], "Hz"
).to(u.Angstrom, equivalencies=u.spectral())
self.lines = lines
self._wavelength_start = 0 * u.angstrom
self._wavelength_end = np.inf * u.angstrom
self._atomic_number = None
self._ion_number = None
self.packet_filter_mode = packet_filter_mode
self.update_last_interaction_filter()
@property
def wavelength_start(self):
return self._wavelength_start.to("angstrom")
@wavelength_start.setter
def wavelength_start(self, value):
if not isinstance(value, u.Quantity):
raise ValueError("needs to be a Quantity")
self._wavelength_start = value
self.update_last_interaction_filter()
@property
def wavelength_end(self):
return self._wavelength_end.to("angstrom")
@wavelength_end.setter
def wavelength_end(self, value):
if not isinstance(value, u.Quantity):
raise ValueError("needs to be a Quantity")
self._wavelength_end = value
self.update_last_interaction_filter()
@property
def atomic_number(self):
return self._atomic_number
@atomic_number.setter
def atomic_number(self, value):
self._atomic_number = value
self.update_last_interaction_filter()
@property
def ion_number(self):
return self._ion_number
@ion_number.setter
def ion_number(self, value):
self._ion_number = value
self.update_last_interaction_filter()
def update_last_interaction_filter(self):
if self.packet_filter_mode == "packet_out_nu":
packet_filter = (
self.last_line_interaction_out_angstrom > self.wavelength_start
) & (self.last_line_interaction_out_angstrom < self.wavelength_end)
elif self.packet_filter_mode == "packet_in_nu":
packet_filter = (
self.last_line_interaction_in_angstrom > self.wavelength_start
) & (self.last_line_interaction_in_angstrom < self.wavelength_end)
elif self.packet_filter_mode == "line_in_nu":
line_in_nu = self.lines.wavelength.iloc[
self.last_line_interaction_in_id
].values
packet_filter = (
line_in_nu > self.wavelength_start.to(u.angstrom).value
) & (line_in_nu < self.wavelength_end.to(u.angstrom).value)
else:
raise ValueError(
"Invalid value of packet_filter_mode. The only values "
"allowed are: packet_out_nu, packet_in_nu, line_in_nu"
)
self.last_line_in = self.lines.iloc[
self.last_line_interaction_in_id[packet_filter]
]
self.last_line_out = self.lines.iloc[
self.last_line_interaction_out_id[packet_filter]
]
if self.atomic_number is not None:
self.last_line_in = self.last_line_in.xs(
self.atomic_number, level="atomic_number", drop_level=False
)
self.last_line_out = self.last_line_out.xs(
self.atomic_number, level="atomic_number", drop_level=False
)
if self.ion_number is not None:
self.last_line_in = self.last_line_in.xs(
self.ion_number, level="ion_number", drop_level=False
)
self.last_line_out = self.last_line_out.xs(
self.ion_number, level="ion_number", drop_level=False
)
last_line_in_count = self.last_line_in.line_id.value_counts()
last_line_out_count = self.last_line_out.line_id.value_counts()
self.last_line_in_table = self.last_line_in.reset_index()[
[
"wavelength",
"atomic_number",
"ion_number",
"level_number_lower",
"level_number_upper",
]
]
self.last_line_in_table["count"] = last_line_in_count
self.last_line_in_table.sort_values(
by="count", ascending=False, inplace=True
)
self.last_line_out_table = self.last_line_out.reset_index()[
[
"wavelength",
"atomic_number",
"ion_number",
"level_number_lower",
"level_number_upper",
]
]
self.last_line_out_table["count"] = last_line_out_count
self.last_line_out_table.sort_values(
by="count", ascending=False, inplace=True
)
def plot_wave_in_out(self, fig, do_clf=True, plot_resonance=True):
if do_clf:
fig.clf()
ax = fig.add_subplot(111)
wave_in = self.last_line_list_in["wavelength"]
wave_out = self.last_line_list_out["wavelength"]
if plot_resonance:
min_wave = np.min([wave_in.min(), wave_out.min()])
max_wave = np.max([wave_in.max(), wave_out.max()])
ax.plot([min_wave, max_wave], [min_wave, max_wave], "b-")
ax.plot(wave_in, wave_out, "b.", picker=True)
ax.set_xlabel("Last interaction Wave in")
ax.set_ylabel("Last interaction Wave out")
def onpick(event):
print("-" * 80)
print(
"Line_in (%d/%d):\n%s"
% (
len(event.ind),
self.current_no_packets,
self.last_line_list_in.ix[event.ind],
)
)
print("\n\n")
print(
"Line_out (%d/%d):\n%s"
% (
len(event.ind),
self.current_no_packets,
self.last_line_list_in.ix[event.ind],
)
)
print("^" * 80)
def onpress(event):
pass
fig.canvas.mpl_connect("pick_event", onpick)
fig.canvas.mpl_connect("on_press", onpress)
class TARDISHistory(object):
"""
Records the history of the model
"""
def __init__(self, hdf5_fname, iterations=None):
self.hdf5_fname = hdf5_fname
if iterations is None:
iterations = []
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
for key in hdf_store.keys():
if key.split("/")[1] == "atom_data":
continue
iterations.append(
int(re.match(r"model(\d+)", key.split("/")[1]).groups()[0])
)
self.iterations = np.sort(np.unique(iterations))
hdf_store.close()
else:
self.iterations = iterations
self.levels = None
self.lines = None
def load_atom_data(self):
if self.levels is None or self.lines is None:
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
self.levels = hdf_store["atom_data/levels"]
self.lines = hdf_store["atom_data/lines"]
hdf_store.close()
def load_t_inner(self, iterations=None):
t_inners = []
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
t_inners.append(
hdf_store["model%03d/configuration" % iter].ix["t_inner"]
)
hdf_store.close()
t_inners = np.array(t_inners)
return t_inners
def load_t_rads(self, iterations=None):
t_rads_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = "iter%03d" % iter
t_rads_dict[current_iter] = hdf_store["model%03d/t_rads" % iter]
t_rads = pd.DataFrame(t_rads_dict)
hdf_store.close()
return t_rads
def load_ws(self, iterations=None):
ws_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = f"iter{iter:03d}"
ws_dict[current_iter] = hdf_store[f"model{iter:03d}/ws"]
hdf_store.close()
return pd.DataFrame(ws_dict)
def load_level_populations(self, iterations=None):
level_populations_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = "iter%03d" % iter
level_populations_dict[current_iter] = hdf_store[
f"model{iter:03d}/level_populations"
]
hdf_store.close()
if is_scalar:
return pd.DataFrame(level_populations_dict.values()[0])
else:
return pd.Panel(level_populations_dict)
def load_jblues(self, iterations=None):
jblues_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = f"iter{iter:03d}"
jblues_dict[current_iter] = hdf_store[f"model{iter:03d}/j_blues"]
hdf_store.close()
if is_scalar:
return pd.DataFrame(jblues_dict.values()[0])
else:
return pd.Panel(jblues_dict)
def load_ion_populations(self, iterations=None):
ion_populations_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = f"iter{iter:03d}"
ion_populations_dict[current_iter] = hdf_store[
f"model{iter:03d}/ion_populations"
]
hdf_store.close()
if is_scalar:
return pd.DataFrame(ion_populations_dict.values()[0])
else:
return pd.Panel(ion_populations_dict)
def load_spectrum(self, iteration, spectrum_keyword="luminosity_density"):
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
spectrum = hdf_store[
"model%03d/%s" % (self.iterations[iteration], spectrum_keyword)
]
hdf_store.close()
return spectrum
def calculate_relative_lte_level_populations(self, species, iteration=-1):
self.load_atom_data()
t_rads = self.load_t_rads(iteration)
beta_rads = 1 / (constants.k_B.cgs.value * t_rads.values[:, 0])
species_levels = self.levels.ix[species]
relative_lte_level_populations = (
species_levels.g.values[np.newaxis].T
/ float(species_levels.g.loc[0])
) * np.exp(-beta_rads * species_levels.energy.values[np.newaxis].T)
return pd.DataFrame(
relative_lte_level_populations, index=species_levels.index
)
def calculate_departure_coefficients(self, species, iteration=-1):
self.load_atom_data()
t_rads = self.load_t_rads(iteration)
beta_rads = 1 / (constants.k_B.cgs.value * t_rads.values[:, 0])
species_levels = self.levels.ix[species]
species_level_populations = self.load_level_populations(iteration).ix[
species
]
departure_coefficient = (
(species_level_populations.values * species_levels.g.ix[0])
/ (
species_level_populations.ix[0].values
* species_levels.g.values[np.newaxis].T
)
) * np.exp(beta_rads * species_levels.energy.values[np.newaxis].T)
return pd.DataFrame(departure_coefficient, index=species_levels.index)
def get_last_line_interaction(self, iteration=-1):
iteration = self.iterations[iteration]
self.load_atom_data()
hdf_store = pd.HDFStore(self.hdf5_fname, "r")
model_string = "model" + ("%03d" % iteration) + "/%s"
last_line_interaction_in_id = hdf_store[
model_string % "last_line_interaction_in_id"
].values
last_line_interaction_out_id = hdf_store[
model_string % "last_line_interaction_out_id"
].values
last_line_interaction_shell_id = hdf_store[
model_string % "last_line_interaction_shell_id"
].values
try:
montecarlo_nu = hdf_store[
model_string % "montecarlo_nus_path"
].values
except KeyError:
montecarlo_nu = hdf_store[model_string % "montecarlo_nus"].values
hdf_store.close()
return LastLineInteraction(
last_line_interaction_in_id,
last_line_interaction_out_id,
last_line_interaction_shell_id,
montecarlo_nu,
self.lines,
)
| bsd-3-clause |
jereze/scikit-learn | sklearn/tree/tests/test_tree.py | 48 | 47506 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import ignore_warnings
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| mit |
michael-hoffman/titanic-revisited | titanic_MICE_RS_SVM.py | 1 | 5831 | # data analysis and wrangling
import pandas as pd
import numpy as np
import scipy
# visualization
import matplotlib.pyplot as plt
import seaborn as sns
# machine learning
from sklearn.svm import SVC
from sklearn import preprocessing
import fancyimpute
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
# utility
from time import time
training_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
## Set of functions to transform features into more convenient format.
#
# Code performs three separate tasks:
# (1). Pull out the first letter of the cabin feature.
# Code taken from: https://www.kaggle.com/jeffd23/titanic/scikit-learn-ml-from-start-to-finish
# (2). Add column which is binary variable that pertains
# to whether the cabin feature is known or not.
# (This may be relevant for Pclass = 1).
# (3). Recasts cabin feature as number.
def simplify_cabins(data):
data.Cabin = data.Cabin.fillna('N')
data.Cabin = data.Cabin.apply(lambda x: x[0])
cabin_mapping = {'N': 0, 'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 1,
'F': 1, 'G': 1, 'T': 1}
data['Cabin_Known'] = data.Cabin.map(cabin_mapping)
le = preprocessing.LabelEncoder().fit(data.Cabin)
data.Cabin = le.transform(data.Cabin)
return data
# Recast sex as numerical feature.
def simplify_sex(data):
sex_mapping = {'male': 0, 'female': 1}
data.Sex = data.Sex.map(sex_mapping).astype(int)
return data
# Recast port of departure as numerical feature.
def simplify_embark(data):
# Two missing values, assign the most common port of departure.
data.Embarked = data.Embarked.fillna('S')
le = preprocessing.LabelEncoder().fit(data.Embarked)
data.Embarked = le.transform(data.Embarked)
return data
# Extract title from names, then assign to one of five ordinal classes.
# Function based on code from: https://www.kaggle.com/startupsci/titanic/titanic-data-science-solutions
def add_title(data):
data['Title'] = data.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
data.Title = data.Title.replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major',
'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
data.Title = data.Title.replace('Mlle', 'Miss')
data.Title = data.Title.replace('Ms', 'Miss')
data.Title = data.Title.replace('Mme', 'Mrs')
# Map from strings to ordinal variables.
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
data.Title = data.Title.map(title_mapping)
data.Title = data.Title.fillna(0)
return data
# Drop all unwanted features (name, ticket).
def drop_features(data):
return data.drop(['Name', 'Ticket'], axis=1)
# Utility function to report best scores
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
# Perform all feature transformations.
def transform_all(data):
data = simplify_cabins(data)
data = simplify_sex(data)
data = simplify_embark(data)
data = add_title(data)
data = drop_features(data)
return data
training_data = transform_all(training_data)
test_data = transform_all(test_data)
# Impute single missing 'Fare' value with median
training_data['Fare'] = training_data['Fare'].fillna(training_data['Fare'].median())
test_data['Fare'] = test_data['Fare'].fillna(test_data['Fare'].median())
all_data = [training_data, test_data]
combined = pd.concat(all_data)
def impute_ages(data):
drop_survived = data.drop(['Survived'], axis=1)
column_titles = list(drop_survived)
mice_results = fancyimpute.MICE().complete(np.array(drop_survived))
results = pd.DataFrame(mice_results, columns=column_titles)
results['Survived'] = list(data['Survived'])
return results
combined = impute_ages(combined)
training_data = combined[:891]
test_data = combined[891:].drop('Survived', axis=1)
# transform age and fare data to have mean zero and variance 1.0
# it may only be appropriate to do a min max scaling here
scaler = preprocessing.StandardScaler()
select = 'Age Fare'.split()
training_data[select] = scaler.fit_transform(training_data[select])
# drop uninformative data and the target feature
droplist = 'Survived PassengerId'.split()
data = training_data.drop(droplist, axis=1)
# Define features and target values
X, y = data, training_data['Survived']
# generate the polynomial features
poly = preprocessing.PolynomialFeatures(2)
X = pd.DataFrame(poly.fit_transform(X))
# ----------------------------------
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)
# Support Vector Machines
#
# Set the parameters by cross-validation
param_dist = {'C': scipy.stats.uniform(0.1, 1000), 'gamma': scipy.stats.uniform(.001, 1.0),
'kernel': ['rbf'], 'class_weight':['balanced', None]}
clf = SVC()
# run randomized search
n_iter_search = 100
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search, n_jobs=-1, cv=4)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.cv_results_) | gpl-3.0 |
doolanshire/Combat-Models | lanchester/lanchesterLinear.py | 1 | 1911 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 18 2017
@author: Alvaro Radigales
A simple Python implementation of the Lanchester Linear Law. Force
strength for each time pulse of the simulation is stored in a NumPy
array, and later plotted using MatPlotLib.
"""
import numpy
import matplotlib.pyplot as plot
from math import ceil
# The length of the time step will not alter the end result.
# Use only to determine the resolution of the graph.
timeStart = 0.0
timeEnd = 10.0
timeStep = 0.01
steps = int((timeEnd - timeStart) / timeStep)
# Initialise numpy arrays covering each step of the simulation.
blue = numpy.zeros(steps)
red = numpy.zeros(steps)
time = numpy.zeros(steps)
# To remove the frontage constraint, change the frontage variable to
# the smaller remaining force, both in its declaration and in the loop.
blue[0] = 42
red[0] = 30
frontage = 5
blueLethality = 1
redLethality = 1
time[0] = timeStart
for i in range(steps -1):
frontage = min(frontage, ceil(red[i]), ceil(blue[i]))
blue[i+1] = max(0, blue[i] - timeStep * (frontage * redLethality))
red[i+1] = max(0, red[i] - timeStep * (frontage * blueLethality))
time[i+1] = time[i] + timeStep
# Remaining forces at the end of the simulation, for plot label purposes.
blueRemaining = int(blue[len(blue)-1])
redRemaining = int(red[len(red)-1])
# Plot code.
plot.figure()
plot.step(time, blue, '-b', where = 'post', label = 'Blue army')
plot.step(time, red, '-r', where = 'post', label = 'Red army')
plot.ylabel('Strength')
plot.xlabel('Time')
plot.legend()
plot.annotate(blueRemaining,
xy=(timeEnd, blue[len(blue)-1]),
xytext=(-15,10),
textcoords='offset points')
plot.annotate(redRemaining,
xy=(timeEnd, red[len(red)-1]),
xytext=(-15,10),
textcoords='offset points')
plot.show() | mit |
joshbohde/scikit-learn | examples/cluster/plot_dbscan.py | 1 | 2325 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print __doc__
import numpy as np
from scipy.spatial import distance
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4)
##############################################################################
# Compute similarities
D = distance.squareform(distance.pdist(X))
S = 1 - (D / np.max(D))
##############################################################################
# Compute DBSCAN
db = DBSCAN().fit(S, eps=0.95, min_samples=10)
core_samples = db.core_sample_indices_
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print 'Estimated number of clusters: %d' % n_clusters_
print "Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)
print "Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)
print "V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)
##############################################################################
# Plot result
import pylab as pl
from itertools import cycle
pl.close('all')
pl.figure(1)
pl.clf()
# Black removed and is used for noise instead.
colors = cycle('bgrcmybgrcmybgrcmybgrcmy')
for k, col in zip(set(labels), colors):
if k == -1:
# Black used for noise.
col = 'k'
markersize = 6
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples
if labels[index] == k]
for index in class_members:
x = X[index]
if index in core_samples and k != -1:
markersize = 14
else:
markersize = 6
pl.plot(x[0], x[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=markersize)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show()
| bsd-3-clause |
SuLab/scheduled-bots | scheduled_bots/drugs/mesh_pa/drug_bot.py | 1 | 5501 | # get drugs from mesh (only looking at drugs that have a pharm action)
# add mesh ID to wikidata by matching up with xrefs in mesh (unii, cas, or ec)
# drugs could be small molecules, protiens, biological entities
import pandas as pd
import time
pd.set_option("display.width", 220)
import re
from tqdm import tqdm
from scheduled_bots.drugs.mesh_pa.dumper import get_drug_pa
from wikidataintegrator import wdi_helpers, wdi_core, wdi_login, ref_handlers
from scheduled_bots.local import WDPASS, WDUSER
PROPS = {'MeSH ID': 'P486',
'MeSH Code': 'P672',
'subclass': 'P279',
'has role': 'P2868'}
ITEMS = {'Medical Subject Headings': 'Q199897'}
login = wdi_login.WDLogin(WDUSER, WDPASS)
#################
## get drugs by registry numbers. add mesh IDs
#################
def infer_type(s):
if len(s) == 10 and '-' not in s:
# https://www.wikidata.org/wiki/Property:P652
return 'unii', s
if re.fullmatch("[1-9]\d{1,6}-\d{2}-\d", s):
# https://www.wikidata.org/wiki/Property:P231
return 'cas', s
if s.startswith("EC "):
# https://www.wikidata.org/wiki/Property:P591
return "ec", s.replace("EC ", "")
else:
return None, s
# get all drugs with a pharm action, plus the xrefs (from mesh, sparql)
df = get_drug_pa()
df = df.drop_duplicates("mesh")
mesh_qid = wdi_helpers.id_mapper("P486", return_as_set=True, prefer_exact_match=True)
mesh_qid = {k: list(v)[0] if len(v) == 1 else v for k, v in mesh_qid.items()}
unii_qid = wdi_helpers.id_mapper("P652", return_as_set=True, prefer_exact_match=True)
unii_qid = {k: list(v)[0] if len(v) == 1 else v for k, v in unii_qid.items()}
ec_qid = wdi_helpers.id_mapper("P591", return_as_set=True, prefer_exact_match=True)
ec_qid = {k: list(v)[0] if len(v) == 1 else v for k, v in ec_qid.items()}
cas_qid = wdi_helpers.id_mapper("P231", return_as_set=True, prefer_exact_match=True)
cas_qid = {k: list(v)[0] if len(v) == 1 else v for k, v in cas_qid.items()}
df['rn_type'] = df.rn.map(lambda x: infer_type(x)[0])
df['rn'] = df.rn.map(lambda x: infer_type(x)[1])
# see what was missed (nothing except '0')
print("Failed to recognize the following xrefs: {}".format(set(df[df.rn_type.isnull()].rn)))
df.dropna(subset=['rn_type'], inplace=True)
# the ec ones that end in a hyphen are just... wrong
df = df[~df.rn.str.endswith("-")]
df['mesh_qid'] = df.mesh.map(mesh_qid.get)
df['unii_qid'] = df.rn.map(unii_qid.get)
df['ec_qid'] = df.rn.map(ec_qid.get)
df['cas_qid'] = df.rn.map(cas_qid.get)
# looking at each xref type, which have multiple possible qids?
cols = ['unii_qid', 'ec_qid', 'cas_qid']
# need to look at these. they have multiple qids
for col in cols:
bad_df = df[df[col].map(lambda x: type(x) == set)]
print("The following df contains drugs where the xref has multiple possible {} QIDs: \n{}".format(col, bad_df))
df = df[~df.index.isin(bad_df.index)]
# are there cases where the xrefs disagree?
bad_df = df[df[cols].apply(lambda x: len(x.dropna())>1, axis=1)]
if not bad_df.empty:
print("the following disagree on xrefs: \n{}".format(bad_df))
df = df[~df.index.isin(bad_df.index)]
# take one of the xref qids
df['rn_qid'] = df[cols].apply(lambda x: ','.join(x.dropna()), axis=1)
df.rn_qid = df.rn_qid.replace("", pd.np.nan)
df = df.iloc[:, ~df.columns.isin(cols)]
# check that the mesh_qid and rn_qid are the same if both are found
s = df.dropna(subset=['mesh_qid', 'rn_qid'])
bad_df = s[s.mesh_qid != s.rn_qid]
print("these {} need to be checked (the mesh QID and xref QIDs dont match): \n{}".format(len(bad_df), bad_df))
print("the good news is that there are {} that agree!!!".format(len(s[s.mesh_qid == s.rn_qid])))
df = df[~df.index.isin(bad_df.index)]
# merge the mesh_qid and rn_qid
df['qid'] = df.mesh_qid.combine_first(df.rn_qid)
# and drop any that are still null
print("these {} have no matchable xrefs in wikidata: \n{}".format(len(df[df.qid.isnull()]), df[df.qid.isnull()]))
df.dropna(subset=['qid'], inplace=True)
# are there any that have matched to the same qid?
print("These have matched to the same QID based on xrefs: \n{}".format(df[df.duplicated("qid")]))
df = df[~df.duplicated("qid")]
print("left with {} mesh drugs".format(len(df)))
#################
## Add drug mesh IDs
#################
def make_ref(mesh_id):
refs = [[
wdi_core.WDItemID(value=ITEMS['Medical Subject Headings'], prop_nr='P248', is_reference=True), # stated in mesh
wdi_core.WDExternalID(value=mesh_id, prop_nr=PROPS['MeSH ID'], is_reference=True), # mesh id
wdi_core.WDTime(time=time.strftime('+%Y-%m-%dT00:00:00Z'), prop_nr='P813', is_reference=True) # retrieved
]]
return refs
records = df.to_dict("records")
records = [r for r in records if r['qid'] and not r['mesh_qid']]
for record in tqdm(records):
s = [wdi_core.WDString(record['mesh'], PROPS['MeSH ID'], references=make_ref(record['mesh']))]
item = wdi_core.WDItemEngine(wd_item_id=record['qid'], data=s,
ref_handler=ref_handlers.update_retrieved_if_new_multiple_refs,
fast_run_use_refs=True, fast_run=True, fast_run_base_filter={PROPS['MeSH ID']: ''})
try:
item.write(login)
except Exception as e:
print(e)
print(record)
# TODO:
#################
## Add drug mesh subclass links
#################
# example: alprazolam is a benzodiazepine
# https://meshb.nlm.nih.gov/record/ui?ui=D000525
# https://www.wikidata.org/wiki/Q319877#P279 | mit |
benoitsteiner/tensorflow-opencl | tensorflow/examples/learn/multiple_gpu.py | 24 | 4167 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: Dict of input `Tensor`.
labels: Label `Tensor`.
mode: One of `ModeKeys`.
Returns:
`EstimatorSpec`.
"""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
with tf.device('/device:GPU:1'):
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
with tf.device('/device:GPU:2'):
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3)
# and with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(
loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
peastman/msmbuilder | msmbuilder/cluster/ndgrid.py | 12 | 4316 | # Author: Robert McGibbon <[email protected]>
# Contributors:
# Copyright (c) 2014, Stanford University
# All rights reserved.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, division
import numbers
import numpy as np
from sklearn.base import ClusterMixin, TransformerMixin
from . import MultiSequenceClusterMixin
from ..base import BaseEstimator
from ..utils import array2d
__all__ = ['NDGrid']
EPS = 1e-10
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class _NDGrid(ClusterMixin, TransformerMixin):
"""Discretize continuous data points onto an N-dimensional
grid.
This is in some sense the zero-th order approximation to
clustering. We throw down an n-dimensional cartesian grid
over the data points and then quantize each data point by
the index of the bin it's in.
Parameters
----------
n_bins_per_feature : int
Number of bins along each feature (degree of freedom) the total
number of bins will be :math:`n_bins^{n_features}`.
min : {float, array-like, None}, optional
Lower bin edge. If None (default), the min and max for each feature
will be fit during training.
max : {float, array-like, None}, optional
Upper bin edge. If None (default), the min and max for each feature
will be fit during training.
Attributes
----------
n_features : int
Number of features
n_bins : int
The total number of bins
grid : np.ndarray, shape=[n_features, n_bins_per_feature+1]
Bin edges
"""
def __init__(self, n_bins_per_feature=2, min=None, max=None):
self.n_bins_per_feature = n_bins_per_feature
self.min = min
self.max = max
# unknown until we have the number of features
self.n_features = None
self.n_bins = None
self.grid = None
def fit(self, X, y=None):
"""Fit the grid
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Data points
Returns
-------
self
"""
X = array2d(X)
self.n_features = X.shape[1]
self.n_bins = self.n_bins_per_feature ** self.n_features
if self.min is None:
min = np.min(X, axis=0)
elif isinstance(self.min, numbers.Number):
min = self.min * np.ones(self.n_features)
else:
min = np.asarray(self.min)
if not min.shape == (self.n_features,):
raise ValueError('min shape error')
if self.max is None:
max = np.max(X, axis=0)
elif isinstance(self.max, numbers.Number):
max = self.max * np.ones(self.n_features)
else:
max = np.asarray(self.max)
if not max.shape == (self.n_features,):
raise ValueError('max shape error')
self.grid = np.array(
[np.linspace(min[i] - EPS, max[i] + EPS, self.n_bins_per_feature + 1)
for i in range(self.n_features)])
return self
def predict(self, X):
"""Get the index of the grid cell containing each sample in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
New data
Returns
-------
y : array, shape = [n_samples,]
Index of the grid cell containing each sample
"""
if np.any(X < self.grid[:, 0]) or np.any(X > self.grid[:, -1]):
raise ValueError('data out of min/max bounds')
binassign = np.zeros((self.n_features, len(X)), dtype=int)
for i in range(self.n_features):
binassign[i] = np.digitize(X[:, i], self.grid[i]) - 1
labels = np.dot(self.n_bins_per_feature ** np.arange(self.n_features), binassign)
assert np.max(labels) < self.n_bins
return labels
def fit_predict(self, X, y=None):
return self.fit(X).predict(X)
class NDGrid(MultiSequenceClusterMixin, _NDGrid, BaseEstimator):
__doc__ = _NDGrid.__doc__
| lgpl-2.1 |
Fireblend/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_gd_5/task1_scene_classification.py | 6 | 38288 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# DCASE 2016::Acoustic Scene Classification / Baseline System
import argparse
import textwrap
import timeit
import skflow
from sklearn import mixture
from sklearn import preprocessing as pp
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
from src.dataset import *
from src.evaluation import *
from src.features import *
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
final_result = {}
train_start = 0.0
train_end = 0.0
test_start = 0.0
test_end = 0.0
def main(argv):
numpy.random.seed(123456) # let's make randomization predictable
tot_start = timeit.default_timer()
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
DCASE 2016
Task 1: Acoustic Scene Classification
Baseline system
---------------------------------------------
Tampere University of Technology / Audio Research Group
Author: Toni Heittola ( [email protected] )
System description
This is an baseline implementation for D-CASE 2016 challenge acoustic scene classification task.
Features: MFCC (static+delta+acceleration)
Classifier: GMM
'''))
# Setup argument handling
parser.add_argument("-development", help="Use the system in the development mode", action='store_true',
default=False, dest='development')
parser.add_argument("-challenge", help="Use the system in the challenge mode", action='store_true',
default=False, dest='challenge')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
# Load parameters from config file
parameter_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.splitext(os.path.basename(__file__))[0] + '.yaml')
params = load_parameters(parameter_file)
params = process_parameters(params)
make_folders(params)
title("DCASE 2016::Acoustic Scene Classification / Baseline System")
# Check if mode is defined
if not (args.development or args.challenge):
args.development = True
args.challenge = False
dataset_evaluation_mode = 'folds'
if args.development and not args.challenge:
print "Running system in development mode"
dataset_evaluation_mode = 'folds'
elif not args.development and args.challenge:
print "Running system in challenge mode"
dataset_evaluation_mode = 'full'
# Get dataset container class
dataset = eval(params['general']['development_dataset'])(data_path=params['path']['data'])
# Fetch data over internet and setup the data
# ==================================================
if params['flow']['initialize']:
dataset.fetch()
# Extract features for all audio files in the dataset
# ==================================================
if params['flow']['extract_features']:
section_header('Feature extraction')
# Collect files in train sets
files = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
for item_id, item in enumerate(dataset.train(fold)):
if item['file'] not in files:
files.append(item['file'])
for item_id, item in enumerate(dataset.test(fold)):
if item['file'] not in files:
files.append(item['file'])
files = sorted(files)
# Go through files and make sure all features are extracted
do_feature_extraction(files=files,
dataset=dataset,
feature_path=params['path']['features'],
params=params['features'],
overwrite=params['general']['overwrite'])
foot()
# Prepare feature normalizers
# ==================================================
if params['flow']['feature_normalizer']:
section_header('Feature normalizer')
do_feature_normalization(dataset=dataset,
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite'])
foot()
# System training
# ==================================================
if params['flow']['train_system']:
section_header('System training')
train_start = timeit.default_timer()
do_system_training(dataset=dataset,
model_path=params['path']['models'],
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
classifier_params=params['classifier']['parameters'],
classifier_method=params['classifier']['method'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite']
)
train_end = timeit.default_timer()
foot()
# System evaluation in development mode
if args.development and not args.challenge:
# System testing
# ==================================================
if params['flow']['test_system']:
section_header('System testing')
test_start = timeit.default_timer()
do_system_testing(dataset=dataset,
feature_path=params['path']['features'],
result_path=params['path']['results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
test_end = timeit.default_timer()
foot()
# System evaluation
# ==================================================
if params['flow']['evaluate_system']:
section_header('System evaluation')
do_system_evaluation(dataset=dataset,
dataset_evaluation_mode=dataset_evaluation_mode,
result_path=params['path']['results'])
foot()
# System evaluation with challenge data
elif not args.development and args.challenge:
# Fetch data over internet and setup the data
challenge_dataset = eval(params['general']['challenge_dataset'])()
if params['flow']['initialize']:
challenge_dataset.fetch()
# System testing
if params['flow']['test_system']:
section_header('System testing with challenge data')
do_system_testing(dataset=challenge_dataset,
feature_path=params['path']['features'],
result_path=params['path']['challenge_results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=True
)
foot()
print " "
print "Your results for the challenge data are stored at [" + params['path']['challenge_results'] + "]"
print " "
tot_end = timeit.default_timer()
print " "
print "Train Time : " + str(train_end - train_start)
print " "
print " "
print "Test Time : " + str(test_end - test_start)
print " "
print " "
print "Total Time : " + str(tot_end - tot_start)
print " "
final_result['train_time'] = train_end - train_start
final_result['test_time'] = test_end - test_start
final_result['tot_time'] = tot_end - tot_start
joblib.dump(final_result, 'result.pkl')
return 0
def process_parameters(params):
"""Parameter post-processing.
Parameters
----------
params : dict
parameters in dict
Returns
-------
params : dict
processed parameters
"""
# Convert feature extraction window and hop sizes seconds to samples
params['features']['mfcc']['win_length'] = int(params['features']['win_length_seconds'] * params['features']['fs'])
params['features']['mfcc']['hop_length'] = int(params['features']['hop_length_seconds'] * params['features']['fs'])
# Copy parameters for current classifier method
params['classifier']['parameters'] = params['classifier_parameters'][params['classifier']['method']]
# Hash
params['features']['hash'] = get_parameter_hash(params['features'])
params['classifier']['hash'] = get_parameter_hash(params['classifier'])
# Paths
params['path']['data'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['data'])
params['path']['base'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['base'])
# Features
params['path']['features_'] = params['path']['features']
params['path']['features'] = os.path.join(params['path']['base'],
params['path']['features'],
params['features']['hash'])
# Feature normalizers
params['path']['feature_normalizers_'] = params['path']['feature_normalizers']
params['path']['feature_normalizers'] = os.path.join(params['path']['base'],
params['path']['feature_normalizers'],
params['features']['hash'])
# Models
params['path']['models_'] = params['path']['models']
params['path']['models'] = os.path.join(params['path']['base'],
params['path']['models'],
params['features']['hash'], params['classifier']['hash'])
# Results
params['path']['results_'] = params['path']['results']
params['path']['results'] = os.path.join(params['path']['base'],
params['path']['results'],
params['features']['hash'], params['classifier']['hash'])
return params
def make_folders(params, parameter_filename='parameters.yaml'):
"""Create all needed folders, and saves parameters in yaml-file for easier manual browsing of data.
Parameters
----------
params : dict
parameters in dict
parameter_filename : str
filename to save parameters used to generate the folder name
Returns
-------
nothing
"""
# Check that target path exists, create if not
check_path(params['path']['features'])
check_path(params['path']['feature_normalizers'])
check_path(params['path']['models'])
check_path(params['path']['results'])
# Save parameters into folders to help manual browsing of files.
# Features
feature_parameter_filename = os.path.join(params['path']['features'], parameter_filename)
if not os.path.isfile(feature_parameter_filename):
save_parameters(feature_parameter_filename, params['features'])
# Feature normalizers
feature_normalizer_parameter_filename = os.path.join(params['path']['feature_normalizers'], parameter_filename)
if not os.path.isfile(feature_normalizer_parameter_filename):
save_parameters(feature_normalizer_parameter_filename, params['features'])
# Models
model_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(model_features_parameter_filename):
save_parameters(model_features_parameter_filename, params['features'])
model_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(model_models_parameter_filename):
save_parameters(model_models_parameter_filename, params['classifier'])
# Results
# Save parameters into folders to help manual browsing of files.
result_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(result_features_parameter_filename):
save_parameters(result_features_parameter_filename, params['features'])
result_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(result_models_parameter_filename):
save_parameters(result_models_parameter_filename, params['classifier'])
def get_feature_filename(audio_file, path, extension='cpickle'):
"""Get feature filename
Parameters
----------
audio_file : str
audio file name from which the features are extracted
path : str
feature path
extension : str
file extension
(Default value='cpickle')
Returns
-------
feature_filename : str
full feature filename
"""
audio_filename = os.path.split(audio_file)[1]
return os.path.join(path, os.path.splitext(audio_filename)[0] + '.' + extension)
def get_feature_normalizer_filename(fold, path, extension='cpickle'):
"""Get normalizer filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
normalizer path
extension : str
file extension
(Default value='cpickle')
Returns
-------
normalizer_filename : str
full normalizer filename
"""
return os.path.join(path, 'scale_fold' + str(fold) + '.' + extension)
def get_model_filename(fold, path, extension='cpickle'):
"""Get model filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
model path
extension : str
file extension
(Default value='cpickle')
Returns
-------
model_filename : str
full model filename
"""
return os.path.join(path, 'model_fold' + str(fold) + '.' + extension)
def get_result_filename(fold, path, extension='txt'):
"""Get result filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
result path
extension : str
file extension
(Default value='cpickle')
Returns
-------
result_filename : str
full result filename
"""
if fold == 0:
return os.path.join(path, 'results.' + extension)
else:
return os.path.join(path, 'results_fold' + str(fold) + '.' + extension)
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False):
"""Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
"""
# Check that target path exists, create if not
check_path(feature_path)
for file_id, audio_filename in enumerate(files):
# Get feature filename
current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path)
progress(title_text='Extracting',
percentage=(float(file_id) / len(files)),
note=os.path.split(audio_filename)[1])
if not os.path.isfile(current_feature_file) or overwrite:
# Load audio data
if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True,
fs=params['fs'])
else:
raise IOError("Audio file not found [%s]" % audio_filename)
# Extract features
if params['method'] == 'gd':
#feature_file_txt = get_feature_filename(audio_file=os.path.split(audio_filename)[1],
# path=feature_path,
# extension='txt')
feature_data = feature_extraction_gd(y=y,
fs=fs,
lpgd_params=params['gd'],
win_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
else:
# feature_data['feat'].shape is (1501, 60)
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=params['include_mfcc0'],
include_delta=params['include_delta'],
include_acceleration=params['include_acceleration'],
mfcc_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
# Save
save_data(current_feature_file, feature_data)
def do_feature_normalization(dataset, feature_normalizer_path, feature_path, dataset_evaluation_mode='folds',
overwrite=False):
"""Feature normalization
Calculated normalization factors for each evaluation fold based on the training material available.
Parameters
----------
dataset : class
dataset class
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
overwrite : bool
overwrite existing normalizers
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Feature file not found.
"""
# Check that target path exists, create if not
check_path(feature_normalizer_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_normalizer_file = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if not os.path.isfile(current_normalizer_file) or overwrite:
# Initialize statistics
file_count = len(dataset.train(fold))
normalizer = FeatureNormalizer()
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
if os.path.isfile(get_feature_filename(audio_file=item['file'], path=feature_path)):
feature_data = load_data(get_feature_filename(audio_file=item['file'], path=feature_path))['stat']
else:
raise IOError("Feature file not found [%s]" % (item['file']))
# Accumulate statistics
normalizer.accumulate(feature_data)
# Calculate normalization factors
normalizer.finalize()
# Save
save_data(current_normalizer_file, normalizer)
def do_system_training(dataset, model_path, feature_normalizer_path, feature_path, classifier_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System training
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
dataset : class
dataset class
model_path : str
path where the models are saved.
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
classifier_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Feature normalizer not found.
Feature file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(model_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_model_file = get_model_filename(fold=fold, path=model_path)
if not os.path.isfile(current_model_file) or overwrite:
# Load normalizer
feature_normalizer_filename = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if os.path.isfile(feature_normalizer_filename):
normalizer = load_data(feature_normalizer_filename)
else:
raise IOError("Feature normalizer not found [%s]" % feature_normalizer_filename)
# Initialize model container
model_container = {'normalizer': normalizer, 'models': {}}
# Collect training examples
file_count = len(dataset.train(fold))
data = {}
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
raise IOError("Features not found [%s]" % (item['file']))
# Scale features
feature_data = model_container['normalizer'].normalize(feature_data)
# Store features per class label
if item['scene_label'] not in data:
data[item['scene_label']] = feature_data
else:
data[item['scene_label']] = numpy.vstack((data[item['scene_label']], feature_data))
le = pp.LabelEncoder()
tot_data = {}
# Train models for each class
for label in data:
progress(title_text='Train models',
fold=fold,
note=label)
if classifier_method == 'gmm':
model_container['models'][label] = mixture.GMM(**classifier_params).fit(data[label])
elif classifier_method == 'dnn':
if 'x' not in tot_data:
tot_data['x'] = data[label]
tot_data['y'] = numpy.repeat(label, len(data[label]), axis=0)
else:
tot_data['x'] = numpy.vstack((tot_data['x'], data[label]))
tot_data['y'] = numpy.hstack((tot_data['y'], numpy.repeat(label, len(data[label]), axis=0)))
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
clf = skflow.TensorFlowDNNClassifier(**classifier_params)
if classifier_method == 'dnn':
tot_data['y'] = le.fit_transform(tot_data['y'])
clf.fit(tot_data['x'], tot_data['y'])
clf.save('dnn/dnnmodel1')
# Save models
save_data(current_model_file, model_container)
def do_system_testing(dataset, result_path, feature_path, model_path, feature_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System testing.
If extracted features are not found from disk, they are extracted but not saved.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
feature_path : str
path where the features are saved.
model_path : str
path where the models are saved.
feature_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Model file not found.
Audio file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(result_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_result_file = get_result_filename(fold=fold, path=result_path)
if not os.path.isfile(current_result_file) or overwrite:
results = []
# Load class model container
model_filename = get_model_filename(fold=fold, path=model_path)
if os.path.isfile(model_filename):
model_container = load_data(model_filename)
else:
raise IOError("Model file not found [%s]" % model_filename)
file_count = len(dataset.test(fold))
for file_id, item in enumerate(dataset.test(fold)):
progress(title_text='Testing',
fold=fold,
percentage=(float(file_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(item['file']), mono=True,
fs=feature_params['fs'])
else:
raise IOError("Audio file not found [%s]" % (item['file']))
if feature_params['method'] == 'gd':
feature_data = feature_extraction_gd(y=y,
fs=fs,
lpgd_params=params['gd'],
win_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
else:
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=feature_params['include_mfcc0'],
include_delta=feature_params['include_delta'],
include_acceleration=feature_params['include_acceleration'],
mfcc_params=feature_params['mfcc'],
delta_params=feature_params['mfcc_delta'],
acceleration_params=feature_params['mfcc_acceleration'],
statistics=False)['feat']
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
# Do classification for the block
if classifier_method == 'gmm':
current_result = do_classification_gmm(feature_data, model_container)
current_class = current_result['class']
elif classifier_method == 'dnn':
current_result = do_classification_dnn(feature_data, model_container)
current_class = dataset.scene_labels[current_result['class_id']]
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Store the result
if classifier_method == 'gmm':
results.append((dataset.absolute_to_relative(item['file']),
current_class))
elif classifier_method == 'dnn':
logs_in_tuple = tuple(lo for lo in current_result['logls'])
results.append((dataset.absolute_to_relative(item['file']),
current_class) + logs_in_tuple)
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Save testing results
with open(current_result_file, 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
writer.writerow(result_item)
def do_classification_dnn(feature_data, model_container):
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(15)
logls.fill(-numpy.inf)
model_clf = skflow.TensorFlowEstimator.restore('dnn/dnnmodel1')
logls = numpy.sum(numpy.log(model_clf.predict_proba(feature_data)), 0)
classification_result_id = numpy.argmax(logls)
return {'class_id': classification_result_id,
'logls': logls}
def do_classification_gmm(feature_data, model_container):
"""GMM classification for give feature matrix
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
feature_data : numpy.ndarray [shape=(t, feature vector length)]
feature matrix
model_container : dict
model container
Returns
-------
result : str
classification result as scene label
"""
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(len(model_container['models']))
logls.fill(-numpy.inf)
for label_id, label in enumerate(model_container['models']):
logls[label_id] = numpy.sum(model_container['models'][label].score(feature_data))
classification_result_id = numpy.argmax(logls)
return {'class': model_container['models'].keys()[classification_result_id],
'logls': logls}
def do_system_evaluation(dataset, result_path, dataset_evaluation_mode='folds'):
"""System evaluation. Testing outputs are collected and evaluated. Evaluation results are printed.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
Returns
-------
nothing
Raises
-------
IOError
Result file not found
"""
dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results_fold = []
tot_cm = numpy.zeros((dataset.scene_label_count, dataset.scene_label_count))
for fold in dataset.folds(mode=dataset_evaluation_mode):
dcase2016_scene_metric_fold = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results = []
result_filename = get_result_filename(fold=fold, path=result_path)
if os.path.isfile(result_filename):
with open(result_filename, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % result_filename)
# Rewrite the result file
if os.path.isfile(result_filename):
with open(result_filename+'2', 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
y_true = (dataset.file_meta(result_item[0])[0]['scene_label'],)
print type(y_true)
print type(result_item)
writer.writerow(y_true + tuple(result_item))
y_true = []
y_pred = []
for result in results:
y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
y_pred.append(result[1])
dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
dcase2016_scene_metric_fold.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
results_fold.append(dcase2016_scene_metric_fold.results())
tot_cm += confusion_matrix(y_true, y_pred)
final_result['tot_cm'] = tot_cm
final_result['tot_cm_acc'] = numpy.sum(numpy.diag(tot_cm)) / numpy.sum(tot_cm)
results = dcase2016_scene_metric.results()
final_result['result'] = results
print " File-wise evaluation, over %d folds" % dataset.fold_count
fold_labels = ''
separator = ' =====================+======+======+==========+ +'
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_labels += " {:8s} |".format('Fold' + str(fold))
separator += "==========+"
print " {:20s} | {:4s} : {:4s} | {:8s} | |".format('Scene label', 'Nref', 'Nsys', 'Accuracy') + fold_labels
print separator
for label_id, label in enumerate(sorted(results['class_wise_accuracy'])):
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['class_wise_accuracy'][label] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format(label,
results['class_wise_data'][label]['Nref'],
results['class_wise_data'][label]['Nsys'],
results['class_wise_accuracy'][
label] * 100) + fold_values
print separator
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['overall_accuracy'] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format('Overall accuracy',
results['Nref'],
results['Nsys'],
results['overall_accuracy'] * 100) + fold_values
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| mit |
Djabbz/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
snlpatel001213/algorithmia | DeepLearning/RBM/mnist.py | 1 | 4826 | import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
class RBM:
def __init__(self, learning_rate=0.1):
self.ministData = self.getMinistData()
self.numHidden = 10
self.numVisible = self.ministData.shape[1] # 64
self.learningRate = learning_rate
np.random.seed(123)
# Initialize a weight matrix, of dimensions (num_visible x num_hidden), using
# a Gaussian distribution with mean 0 and standard deviation 0.1.
self.weights = 0.1 * np.random.randn(self.numVisible, self.numHidden)
# Insert weights for the bias units into the first row and first column.
self.visibleNodeBias = np.ones(self.numVisible, dtype=float)
self.hiddenNodeBias = np.ones(self.numHidden, dtype=float)
def activation_logistic(self, x):
return 1.0 / (1 + np.exp(-x))
def rmse(self, predictions, targets):
return np.sqrt(((predictions - targets) ** 2).mean())
def train(self, data):
"""
to train RBM as per Hinton's "A Practical Guide to Training Restricted Boltzmann Machines"
:param data:
:return: stores weights in RBM object
"""
for epoch in range(5000):
numberOfData = data.shape[0]
# forward phase
positiveHiddenProbability = self.activation_logistic(np.dot(data, self.weights) + self.hiddenNodeBias)
print positiveHiddenProbability.shape
positiveHiddenStates = positiveHiddenProbability > np.random.rand(numberOfData, self.numHidden)
# reconstruction phase
# 1) Generating reconstructed data
reconstructedData = self.activation_logistic(
np.dot(positiveHiddenStates, self.weights.T) + self.visibleNodeBias)
# 2) Generating reconstructed output
reconstructedOutput = self.activation_logistic(
np.dot(reconstructedData, self.weights) + self.hiddenNodeBias)
# 3) calculating positive Association and negative Association
# 3.A) positive Association = <Vi * Hj> original
positiveAssociation = np.dot(data.T, positiveHiddenProbability)
# 3.B) negative Association = <Vi * Hj> reconstructed
negativeAssociation = np.dot(reconstructedData.T, reconstructedOutput)
# 4) weight change
changeInWeight = self.learningRate * ((positiveAssociation - negativeAssociation) / numberOfData)
self.weights += changeInWeight
error = self.rmse(data , reconstructedData)
print(" %s" % (error))
def getHidden(self, data):
"""
:param data:
:return:
"""
numberofSample = data.shape[0]
output = self.activation_logistic(np.dot(data, self.weights) + self.hiddenNodeBias)
outputStates = output > np.random.rand(numberofSample, self.numHidden)
print (outputStates)
def getVisible(self, data):
"""
:param data:
:return:
"""
numberofSample = data.shape[0]
reconstructedData = self.activation_logistic(np.dot(data, self.weights.T) + self.visibleNodeBias)
data = reconstructedData > np.random.rand(numberofSample, self.numVisible)
print data
def dreaming(self, numberOfTimes):
"""
:return:
"""
samples = np.ones((numberOfTimes, self.numVisible))
sample = samples[0]
for sampleno in range(numberOfTimes):
# forward pass
positiveHiddenProbability = self.activation_logistic(np.dot(sample, self.weights) + self.hiddenNodeBias)
positiveHiddenStates = positiveHiddenProbability > np.random.rand(1, self.numHidden)
# reconstruction phase
# 1) Generating reconstructed data
reconstructedData = self.activation_logistic(
np.dot(positiveHiddenStates, self.weights.T) + self.visibleNodeBias)
reconstructedStates = reconstructedData > np.random.rand(self.numVisible)
# print reconstructedStates
sample = reconstructedStates
samples[sampleno] = reconstructedStates
return samples
def getMinistData(self):
digits = datasets.load_digits()
flatten = []
for eachDigit in digits['images']:
temp = []
for eachrow in eachDigit:
temp.extend(eachrow)
flatten.append(temp)
return np.asarray(flatten,dtype='int8')
if __name__ == '__main__':
r = RBM(learning_rate=2.0)
training_data = r.ministData
r.train(training_data)
# # print r.weights
# # user = np.array([[0, 0, 0, 1, 1, 0]])
# # r.getHidden(user)
# # user = np.array([[1, 0]])
# # r.getVisible(user)
| gpl-3.0 |
henrykironde/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
cybernet14/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 68 | 1807 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
pandas-ml/pandas-ml | pandas_ml/skaccessors/test/test_svm.py | 2 | 2995 | #!/usr/bin/env python
import pytest
import numpy as np
import sklearn.datasets as datasets
import sklearn.svm as svm
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestSVM(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.svm.SVC, svm.SVC)
self.assertIs(df.svm.LinearSVC, svm.LinearSVC)
self.assertIs(df.svm.NuSVC, svm.NuSVC)
self.assertIs(df.svm.SVR, svm.SVR)
self.assertIs(df.svm.NuSVR, svm.NuSVR)
self.assertIs(df.svm.OneClassSVM, svm.OneClassSVM)
def test_l1_min_c(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.svm.l1_min_c()
expected = svm.l1_min_c(iris.data, iris.target)
self.assertAlmostEqual(result, expected)
@pytest.mark.parametrize("algo", ['SVR', 'NuSVR'])
def test_Regressions_curve(self, algo):
# http://scikit-learn.org/stable/auto_examples/plot_kernel_ridge_regression.html
X = 5 * np.random.rand(1000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(X.shape[0] // 5))
df = pdml.ModelFrame(data=X, target=y)
mod1 = getattr(df.svm, algo)()
mod2 = getattr(svm, algo)()
df.fit(mod1)
mod2.fit(X, y)
result = df.predict(mod1)
expected = mod2.predict(X)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
self.assertIsInstance(df.predicted, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(df.predicted.values, expected)
@pytest.mark.parametrize("algo", ['SVR', 'NuSVR'])
def test_Regressions_iris(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.svm, algo)()
mod2 = getattr(svm, algo)()
df.fit(mod1)
mod2.fit(iris.data, iris.target)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
self.assertIsInstance(df.predicted, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(df.predicted.values, expected)
@pytest.mark.parametrize("algo", ['LinearSVC', 'NuSVC'])
def test_Classifications(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.svm, algo)(random_state=self.random_state)
mod2 = getattr(svm, algo)(random_state=self.random_state)
df.fit(mod1)
mod2.fit(iris.data, iris.target)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
| bsd-3-clause |
Eric89GXL/scikit-learn | sklearn/decomposition/__init__.py | 11 | 1307 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA, ProbabilisticPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProbabilisticPCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| bsd-3-clause |
zutshi/S3CAMR | src/core/modelrefine.py | 1 | 54402 | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools as it
#from collections import defaultdict
import collections
import numpy as np
from . import simulatesystem as simsys
from pwa import pwa
from pwa import simulator as pwa_sim
from pwa import relational as rel
#from . import random_testing as rt
from . import concretize as cnc
from bmc import bmc as BMC
from bmc.bmc_spec import InvarStatus
import modeling.affinemodel as AFM
from .cellmodels import Qxw, Qx
from . import cellmanager as CM
from graphs.graph import factory as graph_factory
from graphs.graph import class_factory as graph_class
from . import state
import modeling.cluster as CLST
import pwa.pwagraph as pwagraph
import pwa.relational as R
import settings
import utils as U
from utils import print
import err
from constraints import IntervalCons, top2ic, zero2ic
from IPython import embed
from globalopts import opts as gopts
# TODO: Remove this mess
if gopts.model_type == 'poly':
from pwa import analyzepathnl as azp
else:
from pwa import analyzepath as azp
#np.set_printoptions(suppress=True, precision=2)
# multiply num samples with the
MORE_FACTOR = 100
#TEST_FACTOR = 10
MAX_TRAIN = 500
#MAX_TEST = 200
KMAX = 1
KMIN = 1
KMAX_EXCEEDED = 0
SUCCESS = 1
TERMINAL = 2
PWASYSPROP = collections.namedtuple('pwa_sys_prop', 'pwa_model init_partitions final_partitions')
def abs_state2cell(abs_state, AA):
return CM.Cell(abs_state.plant_state.cell_id, AA.plant_abs.eps)
def ic2multicell(ic, eps):
cells = CM.ic2cell(ic, eps)
return cells
def ic2cell(ic, eps):
cells = CM.ic2cell(ic, eps)
# Due to path normalization the entire pi range gets added as an
# edge by S3CAM wherever paths are shorter than the max path
# length. Please fix this and switch on the assertion.
# FIXED
assert(len(cells) == 1)
return cells[0]
#return CM.Cell(CM.cell_from_concrete(pi, eps), eps)
def simulate_pwa(pwa_model, x_samples, N):
return [pwa_sim.simulate(pwa_model, x0, N) for x0 in x_samples]
def simulate(AA, sp, pwa_model, max_path_len, S0):
NUM_SIMS = 100
# sample only initial abstract state
x0_samples = (sp.sampler.sample_multiple(S0, AA, sp, NUM_SIMS)).x_array
#print(x0_samples)
# sample the entire given initial set
#X0 = sp.init_cons
#x0_samples = sample.sample_ival_constraints(X0, n=1000)
print('path length: {}'.format(max_path_len))
traces = [i for i in simulate_pwa(pwa_model, x0_samples, N=max_path_len)]
return traces
def sim_n_plot(sp, AA, prop, error_paths, depth, pwa_model):
# intial abs state set
S0 = {path[0] for path in error_paths}
s = (state for path in error_paths for state in path)
print('simulating using depth = {} ...'.format(depth))
pwa_traces = simulate(AA, sp, pwa_model, depth, S0)
print('done')
gopts.plotting.figure()
print('plotting...')
gopts.plotting.plot_abs_states(AA, prop, s)
gopts.plotting.plot_rect(sp.final_cons.rect())
gopts.plotting.plot_pwa_traces(pwa_traces)
#fig = BP.figure(title='S3CAMR')
#fig = plt.figure()
gopts.plotting.show()
# def get_qgraph(sp, AA, G, error_paths, pi_seqs):
# for path, pi_seq in zip(error_paths, pi_seqs):
# for (a1, a2), pi_ic in zip(U.pairwise(path), pi_seq):
# x1cell, x2cell = abs_state2cell(a1, AA), abs_state2cell(a2, AA)
# if AA.num_dims.pi:
# wcell = ic2cell(pi_ic, sp.pi_ref.eps)
# else:
# wcell = None
# q1, q2 = Qx(x1cell), Qx(x2cell)
# G.add_edge(q1, q2, pi=wcell)
# return G
class QGraph(graph_class(gopts.graph_lib)):
def __init__(self):
super(self.__class__, self).__init__()
self.init = set()
self.final = set()
#TODO: URGENT: The error path and pi_seq generated from S3CAM is not
# clear. Are we capturing multiple pi values between same states?
# Please review the code where the pi values are added to the edges
# and error paths and pi_seqs extracted.
# Subsumes get_qgraph
def get_qgraph_xw(sp, AA, error_paths, pi_seqs):
# for i, pi_seq in enumerate(pi_seqs):
# for j, pi in enumerate(pi_seq):
# try:
# ic2cell(pi, sp.pi_ref.eps)
# except AssertionError:
# print(i,j)
# print(pi)
# exit()
#G = graph_factory(gopts.graph_lib)
G = QGraph()
for path, pi_seq in zip(error_paths, pi_seqs):
# Normalize the (x, w) list
# e.g., for a 3 state length path, we have 3 abstract states
# and 2 pi cells (ic)
# a0 --pi01--> a1 --pi12--> a2
# Which means that the new flattened graph will have nodes
# like (a0, p01), (a1, p12), (a2, ?)
# a2 can not have None as its pi due to self loops being
# modled. As a pi value is not known for self loop dynamics,
# we assume all possible pi values, i.e., pi \in pi_cons and
# append it at the end of the pi_seq.
# Note, instead of directly appending it, we append None, and
# detect it below and replace accordingly. This is done in
# order to keep ic2cell() and ic2multicell() separate. This
# helps in catching bugs! Can be removed later to improve
# performance.
pi_seq.append(None)
# ###############################
# # Add final and initial states
# #TODO: Only works for AA.num_dims.pi = 0
# a0, af = path[0], path[-1]
# x0cell, xfcell = abs_state2cell(a0, AA), abs_state2cell(af, AA)
# #q0, qf = Qx(a0, x0cell), Qx(af, xfcell)
# q0, qf = Qx(x0cell), Qx(xfcell)
# G.init.add(q0)
# G.final.add(qf)
# ###############################
# TODO: Merge the two branches?
if AA.num_dims.pi:
# Fix this crap: have wcell on edge?
raise NotImplementedError
for (a1, a2), (pi1_ic, pi2_ic) in it.zip_longest(U.pairwise(path), U.pairwise(pi_seq)):
x1cell, x2cell = abs_state2cell(a1, AA), abs_state2cell(a2, AA)
w1cell = ic2cell(pi1_ic, sp.pi_ref.eps)
q1 = Qxw(a1, x1cell, w1cell)
if pi2_ic is None:
w2cells = ic2multicell(sp.pi_ref.i_cons, sp.pi_ref.eps)
q2s = [Qxw(a2, x2cell, w2cell) for w2cell in w2cells]
for q2 in q2s:
G.add_edge(q1, q2)
else:
w2cell = ic2cell(pi2_ic, sp.pi_ref.eps)
q2 = Qxw(a2, x2cell, w2cell)
G.add_edge(q1, q2)
else:
for (a1, a2) in U.pairwise(path):
x1cell, x2cell = abs_state2cell(a1, AA), abs_state2cell(a2, AA)
#q1, q2 = Qx(a1, x1cell), Qx(a2, x2cell)
q1, q2 = Qx(x1cell), Qx(x2cell)
G.add_edge(q1, q2)
return G
def error_graph2qgraph_xw(sp, AA, initial_state_set, final_state_set, error_graph):
G = QGraph()
# # Add all initial and final q
# #TODO: works only for AA.num_dims.pi = 0
# ################################
# for a in error_graph.nodes():
# if a in initial_state_set:
# xcell = abs_state2cell(a, AA)
# #q = Qx(a, xcell)
# q = Qx(xcell)
# G.init.add(q)
# elif a in final_state_set:
# xcell = abs_state2cell(a, AA)
# #q = Qx(a, xcell)
# q = Qx(xcell)
# G.final.add(q)
# else:
# pass
# ##############################
for edge in error_graph.all_edges():
a1, a2 = edge
x1cell, x2cell = abs_state2cell(a1, AA), abs_state2cell(a2, AA)
if AA.num_dims.pi:
pi1_ic = error_graph.edge_attrs(edge)['pi']
assert(pi1_ic is not None)
w1cell = ic2cell(pi1_ic, sp.pi_ref.eps)
# This is a mess. Instead of being an edge, pi is actuall
# in the node....fix it somehow
assert(False)
w2cell = ic2cell(pi2_ic, sp.pi_ref.eps)
q1, q2 = Qxw(x1cell, w1cell), Qxw(x2cell, w2cell)
else:
#q1, q2 = Qx(a1, x1cell), Qx(a2, x2cell)
q1, q2 = Qx(x1cell), Qx(x2cell)
G.add_edge(q1, q2)
return G
def get_pwa_system(sys, prop, sp, qgraph):
#pwa_sys_prop.pwa_model
pwa_sys_prop = build_pwa_model(sys, prop, qgraph, sp, 'dft')
# if settings.debug:
# qg.draw_graphviz()
# qg.draw_mplib()
draw_model(gopts.construct_path(sys.sys_name), pwa_sys_prop.pwa_model)
#max_path_len = max([len(path) for path in error_paths])
#print('max_path_len:', max_path_len)
# depth = max_path_len - 1, because path_len = num_states along
# path. This is 1 less than SAL depth and simulation length
#depth = max(int(np.ceil(AA.T/AA.delta_t)), max_path_len - 1)
depth = int(np.ceil(prop.T/sys.delta_t))
print('depth :', depth)
# if settings.debug_plot:
# # Need to define a new function to simulate inputs
# sim_n_plot(sp, AA, prop, error_paths, depth, pwa_model)
# TODO: The reason we need this is to encode the transitions which
# land to error states. This is required to discard bmc paths
# which do the below:
#
# p0 -m01-> p1 -m12-> p2
# Where p1->p2 is valid, but m12(x) \not-in p2(x)
# cell = p1 and p1(x) -->
# cell = p2 and x' = m12(x)
#
# Clearly, this does not enforce x' \in p2(x)
# Hence, even if x' \in prop, it did not reallty reach there by
# following the rules of evolution of the pwa system.
# Specifically, ignored: x' \in p2(x). And hence, it used m12 to
# go outside p2(x), but the map m12 was meant specifically to go
# to p2
#
# The real reason for this is, that the current BMC encoding is
# faulty. It encodes partitions, but instead, we should be
# encoding transitions. That is, each location of the sal
# transition system is actually a transitions pi --mij--> pj
# Hence, we need to know the error locations, from where we add
# one more partition.
# older calculation of prop_cells
#prop_cells = {abs_state2cell(path[-1], AA) for path in error_paths}
#prop_cells = {abs_state2cell(s, AA) for s in (final_state_set)}
# if gopts.max_paths > 0:
# prop_cells = {abs_state2cell(path[-1], AA) for path in error_paths}
# init_cells = {abs_state2cell(path[0], AA) for path in error_paths}
# else:
# prop_cells = {abs_state2cell(s, AA) for s in (final_state_set)}
# init_cells = {abs_state2cell(s, AA) for s in (initial_state_set)}
assert(pwa_sys_prop.init_partitions)
assert(pwa_sys_prop.final_partitions)
# # partitions do not have a hash function. Hence, using a work
# # around to avoid duplications
# # Create a mapping from Q -> partitions
# Q_p_map = {sm.p.ID.xcell: sm.p for sm in pwa_model if sm.p.ID.xcell in prop_cells}
# # Q have a hash function
# prop_partitions = [sm.p for sm in pwa_model if sm.p.ID.xcell in prop_cells]
# init_partitions = {sm.p.ID.xcell: sm.p for sm in pwa_model if sm.p.ID.xcell in init_cells}.values()
return pwa_sys_prop, depth
# ignores the constraints on pnexts: p_future
def convert_pwarel2pwagraph(pwa_rel_model):
assert(isinstance(pwa_rel_model, R.PWARelational))
pwa_graph = pwagraph.PWAGraph()
for sm in pwa_rel_model:
assert(isinstance(sm, R.KPath))
assert(len(sm.pnexts) >= 1)
#TODO: why more than 1?
assert(len(sm.pnexts) == 1)
for pi in sm.pnexts:
pwa_graph.add_relation(sm.p, pi, sm.m)
return pwa_graph
def refine_dft_model_based(AA, errors, initial_state_set, final_state_set, sp, sys_sim, sys, prop):
# initialize Qxw class
if AA.num_dims.pi:
Qxw.init(sp.pi_ref.i_cons)
if gopts.max_paths > 0:
error_paths, pi_seqs = errors
qgraph = get_qgraph_xw(sp, AA, error_paths, pi_seqs)
else:
error_graph = errors
qgraph = error_graph2qgraph_xw(sp, AA, initial_state_set,
final_state_set, error_graph)
# make sure init and final are not empty
#qgraph = get_qgraph_xw(sp, AA, error_paths, pi_seqs)
#qgraph = error_graph2qgraph_xw(sp, AA, error_graph)
# Add init and final states to qgraph
qgraph.init = {q for q in qgraph if q.xcell.ival_constraints & prop.init_cons}
qgraph.final = {q for q in qgraph if q.xcell.ival_constraints & prop.final_cons}
# assert(qgraph.init == init)
# assert(qgraph.final == final)
assert(qgraph.init)
assert(qgraph.final)
pwa_sys_prop, depth = get_pwa_system(sys, prop, sp, qgraph)
# flush all plots: must block
gopts.plotting.show(block=True)
pwa_graph = convert_pwarel2pwagraph(pwa_sys_prop.pwa_model)
return check4CE(pwa_graph, depth,
pwa_sys_prop.init_partitions,
pwa_sys_prop.final_partitions,
sys.sys_name, 'dft', AA, sys, prop, sp)
def check4CE(pwa_model, depth, init_partitions, prop_partitions, sys_name, model_type, AA, sys, prop, sp):
# TODO: Why?
# Extend both init set and final set to include inputs if any
dummy_cons = top2ic(AA.num_dims.pi) # T <=> [-inf, inf]
final_cons2 = IntervalCons.concatenate(sp.final_cons, dummy_cons)
init_cons = (sp.init_cons if AA.num_dims.pi == 0
else IntervalCons.concatenate(
sp.init_cons,
sp.pi_ref.i_cons))
xs = ['x'+str(i) for i in range(AA.num_dims.x)]
ws = ['w'+str(i) for i in range(AA.num_dims.pi)]
# Order is important
vs = xs + ws
bmc = BMC.factory(
gopts.bmc_engine,
sys,
prop,
vs,
pwa_model, init_cons, final_cons2,
init_partitions,
prop_partitions,
gopts.construct_path,
'{}_{}'.format(sys_name, model_type),
model_type,
gopts.smt_engine)
traces = bmc.trace_generator(depth)
num_traces = 0
for bmc_trace, pwa_trace in traces:
num_traces += 1
print('Unsafe behavior found...trying to concretize...')
if settings.debug:
print(bmc_trace)
print(pwa_trace)
result = verify_traces(AA, sys, prop, sp, bmc_trace, pwa_trace)
if result:
break
#result = True
if num_traces == 0:
print("The system has been determined to be 'Safe'")
result = False
return result
# def check4CE(pwa_model, depth, init_partitions, prop_partitions, sys_name, model_type, AA, sys, prop, sp):
# # TODO: Why?
# # Extend both init set and final set to include inputs if any
# dummy_cons = top2ic(AA.num_dims.pi) # T <=> [-inf, inf]
# final_cons2 = IntervalCons.concatenate(sp.final_cons, dummy_cons)
# init_cons = (sp.init_cons if AA.num_dims.pi == 0
# else IntervalCons.concatenate(
# sp.init_cons,
# sp.pi_ref.i_cons))
# xs = ['x'+str(i) for i in range(AA.num_dims.x)]
# ws = ['w'+str(i) for i in range(AA.num_dims.pi)]
# # Order is important
# vs = xs + ws
# bmc = BMC.factory(
# gopts.bmc_engine,
# sys,
# prop,
# vs,
# pwa_model, init_cons, final_cons2,
# init_partitions,
# prop_partitions,
# gopts.construct_path,
# '{}_{}'.format(sys_name, model_type),
# model_type)
# status = bmc.check(depth)
# if status == InvarStatus.Safe:
# print("The system has been determined to be 'Safe'")
# exit()
# elif status == InvarStatus.Unsafe:
# #bmc_trace = bmc.get_last_trace()
# #print(bmc_trace)
# #print(bmc_trace.to_array())
# bmc_trace = bmc.get_trace()
# pwa_trace = bmc.get_pwa_trace()
# #xw_array = bmc_trace.to_array()
# #pwa_trace = bmc.get_last_traces()
# while bmc_trace is not None:
# #pwa_trace = bmc.get_last_pwa_trace()
# if settings.debug:
# print(bmc_trace)
# print(pwa_trace)
# print('Unsafe behavior found...trying to concretize...')
# #verify_bmc_trace(AA, sys, prop, sp, bmc.trace, xs, ws)
# #xw_array = bmc_trace.to_array()
# verify_traces(AA, sys, prop, sp, bmc_trace, pwa_trace)
# bmc.gen_new_disc_trace()
# #bmc_trace = bmc.get_last_trace()
# #xw_array, pwa_trace = bmc.get_last_traces()
# bmc_trace = bmc.get_trace()
# pwa_trace = bmc.get_pwa_trace()
# #U.pause()
# elif status == InvarStatus.Unknown:
# print('Unknown...exiting')
# exit()
# else:
# raise err.Fatal('Internal')
def verify_traces(AA, sys, prop, sp, bmc_trace, pwa_trace):
xw_array = bmc_trace.to_array()
x_array, w_array = np.split(xw_array, [AA.num_dims.x], axis=1)
pi_seq = w_array
res = verify_bmc_trace(AA, sys, prop, sp, x_array, pi_seq)
res = verify_pwa_trace(AA, sys, prop, sp, x_array, pi_seq, pwa_trace)
return res
def verify_bmc_trace(AA, sys, prop, sp, x_array, pi_seq):
"""Get multiple traces and send them for random testing
"""
#init_assignments = trace[0].assignments
#x0_array = np.array([init_assignments[x] for x in xs])
# Trace consists of transitions, but we want to interpret it as
# locations (abs_states + wi). Hence, subtract 1 from trace.
# TODO: fix inputs!!
#pi_seq = [[step.assignments[w] for w in ws] for step in bmc_trace[:-1]]
gopts.plotting.new_session()
res = cnc.concretize_bmc_trace(sys, prop, AA, sp, x_array, pi_seq)
return res
def verify_pwa_trace(AA, sys, prop, sp, x_array, pi_seq, pwa_trace):
gopts.plotting.new_session()
init_cons_subset = azp.overapprox_x0(AA.num_dims, prop, pwa_trace)
if init_cons_subset is None:
raise err.Fatal('BMC (feasible) and LP (infeasible?) solution do not agree!')
return cnc.concretize_init_cons_subset(sys, prop, AA, sp, x_array, pi_seq, init_cons_subset)
def get_abstract_path(AA, x_array):
# old function, not sure about its current health: marking it non
# implemented
raise NotImplementedError
abs_path = []
t, d, pvt, ci, s, pi, u = [0]*7
for x in x_array:
concrete_state = state.State(t, x, d, pvt, ci, s, pi, u)
abs_state = AA.get_abs_state_from_concrete_state(concrete_state)
abs_path.append(abs_state)
return abs_path
def refine_dmt_model_based(AA, error_paths, pi_seq_list, sp, sys_sim, bmc_engine):
"""refine using discrete time models
Parameters
----------
A :
error_paths :
pi_seq_list :
sp :
sys_sim :
Returns
-------
Notes
------
does not handle pi_seq_list yet
"""
raise NotImplementedError
def build_pwa_model(sys, prop, qgraph, sp, model_type):
"""build_pwa_model
Builds both dft and rel models
Parameters
----------
abs_objs : Either abs_states (for dft models) or relations
[tuple(abs_state_src, abs_state_target)] for rel model
sp : system params
tol : modeling error tolerance
include_err : include error in determining next state
x' = x +- errror
"""
tol = gopts.max_model_error
include_err = gopts.model_err
# number of training samples
#TODO : should be min and not max!
#ntrain = min(gopts.regression_sim_samples * MORE_FACTOR, MAX_TRAIN)
K = 30
ntrain = K*(sys.num_dims.x+1)
# number of test samples
#ntest = min(ntrain * TEST_FACTOR, MAX_TEST)
dt = sys.delta_t
step_sim = simsys.get_step_simulator(sp.controller_sim, sp.plant_sim, dt)
#abs_state_models = {}
pwa_model = rel.PWARelational()
init_partitions = set()
final_partitions = set()
init_not_added = set()
final_not_added = set()
#q2pid = collections.defaultdict(set)
# for ever vertex (abs_state) in the graph
for q in qgraph:
if settings.debug:
print('modeling: {}'.format(q))
sentinel = 0
for sub_model in q_affine_models(prop, ntrain, step_sim, tol, include_err, qgraph, q):
sentinel = 1
assert(sub_model is not None)
# sub_model.pnexts[0] = sub_model.p.ID to enforce self loops
print(U.colorize('{} -> {}, e%:{}, status: {}, e: {}, A:{}, b:{}'.format(
sub_model.p.ID,
[p.ID for p in sub_model.pnexts],
np.trunc(sub_model.max_error_pc),
sub_model_status(sub_model),
sub_model.m.error,
#str(sub_model.m.A).replace('\n', ''),
#sub_model.m.b
None,
None)))
pwa_model.add(sub_model)
#q2pid[q].add(sub_model.p.pID)
#abs_state_models[abs_state] = sub_model
# If the loop never ran, no submodel was found. Do not add q
# to init/final states
if sentinel == 1:
# Even if a state gets split, its recorded
if q in qgraph.init:
#init_partitions.add(ModelPartition(*q.poly(), part_id=q))
init_partitions.add(ModelPartition(*q.poly(), qi=q))
#init_partitions.update(q2pid[q])
# TODO: If we split a final cell to increase precision for
# the transition to concrete error_states and break the
# terminal self-loop, both will get recorded and get
# weird.
if q in qgraph.final:
#final_partitions.add(ModelPartition(*q.poly(), part_id=q))
final_partitions.add(ModelPartition(*q.poly(), qi=q))
#final_partitions.update(q2pid[q])
else:
if q in qgraph.init:
init_not_added.add(q)
if q in qgraph.final:
final_not_added.add(q)
#TODO: REMOVE, added only because bball gives an eeror. Exact
#cause of error is not confirmed. It happens mostly because none of
#the final states have a self loop.
for q in qgraph.final:
final_partitions.add(ModelPartition(*q.poly(), qi=q))
pwa_sys_prop = PWASYSPROP(pwa_model, init_partitions, final_partitions)
return pwa_sys_prop
# TODO: fix this mess and move it to pwa models
def sub_model_status(s, status2str={KMAX_EXCEEDED: 'kamx exceeded', SUCCESS: 'success', TERMINAL: 'terminal'}):
return status2str[s.status]
def draw_model(sys_name, pwa_model):
"""Adds a 'label' attribute to the edges. This is useful for
graphs rendered using graphviz, as it annotates the edges with the
value of those attributes. Currently, the label's value is the
error between a learned relation between two nodes."""
# # update edge attribute by adding it again
# def label_edge_with_error(e, error):
# e_attrs = qgraph.G[e[0]][e[1]]
# e_attrs['label'] = np.round(error, 2)
# qgraph.G.add_edge(*e, attr_dict=e_attrs)
# # keep on looping through the submodels till the right one is
# # found
# for e in qgraph.G.edges():
# q1 = e[0]
# q2 = e[1]
# for sub_model in pwa_model:
# # TODO: builds a list every time!
# if sub_model.p.ID == q1 and any(q2 == pn.ID for pn in sub_model.pnexts):
# label_edge_with_error(e, sub_model.max_error_pc)
# break
G = graph_factory(gopts.graph_lib)
for sub_model in pwa_model:
for p_ in sub_model.pnexts:
#e_attr = {'label': np.round(sub_model.max_error_pc, 2)}
error = np.trunc(sub_model.max_error_pc)
color = 'red' if sub_model.status == KMAX_EXCEEDED else 'black'
G.add_edge(sub_model.p.ID, p_.ID, label=error, color=color)
G.node_attrs(sub_model.p.ID)['label'] = sub_model.p.ID
G.node_attrs(sub_model.p.ID)['tooltip'] = sub_model.p.ID.ival_constraints
if settings.debug:
G.draw_graphviz(sys_name)
#G.draw_mplib(sys_name)
# TODO: it is a superset of build_pwa_dft_model
# and should replace it
def build_pwa_dt_model(AA, abs_states, sp, sys_sim):
"""build_pwa_dt_model
Parameters
----------
AA :
AA is
abs_states :
abs_states is
sp :
sp is
sys_sim :
sys_sim is
Returns
-------
Notes
------
Builds a model with time as a discrete variable.
i.e., models the behaviors resulting from several chosen time
steps and not only the one specified in .tst as delta_t.
"""
dt_steps = [0.01, 0.1, AA.plant_abs.delta_t]
err.warn('using time steps: {}'.format(dt_steps))
step_sims = [simsys.get_step_simulator(sp.controller_sim, sp.plant_sim, dt)
for dt in dt_steps]
pwa_models = {}
for dt, step_sim in zip(dt_steps, step_sims):
pwa_model = pwa.PWA()
for abs_state in abs_states:
sub_model = affine_model(abs_state, AA, sp, step_sim)
pwa_model.add(sub_model)
pwa_models[dt] = pwa_model
return pwa_models
def model(tol, X, Y):
try:
rm = AFM.Model(X, Y)
except AFM.UdetError:
raise AFM.UdetError
#return []
e_pc = rm.max_error_pc(X, Y)
if settings.debug:
err.imp('error%: {}'.format(e_pc))
error_dims = np.arange(len(e_pc))[np.where(e_pc >= tol)]
error_exceeds_tol = len(error_dims) > 0
refine = error_exceeds_tol
#TODO
status = SUCCESS if not refine else KMAX_EXCEEDED
return [(rm, e_pc, status)]
def mdl_1relational(prop, tol, step_sim, qgraph, q, X, Y):
assert(X.shape[1] == q.dim)
assert(Y.shape[1] == q.dim)
assert(X.shape[0] == Y.shape[0])
ms = []
# prevent repetition in case a self loop exists
neighbors_including_self = set(qgraph.neighbors(q))
neighbors_including_self.add(q)
for qi in neighbors_including_self:
print('modeling: Q({}) -> Q\'({})'.format(q, qi))
print(q.ival_constraints, qi.ival_constraints)
if settings.debug:
print('checking qi: ', qi)
sat = qi.sat(Y)
if any(sat):
rm_qseq = model(tol, X[sat], Y[sat])
l = [(rm_, (q, qi), e_pc_, status_) for rm_, e_pc_, status_ in rm_qseq]
ms.extend(l)
else:
if(qi == q):
#TODO: problems with final states empty
#ms.append((rm_, (q, qi), e_pc_, status_))
# no self loop observed
if settings.debug:
print('no self loop found')
else:
err.warn('out of samples...Giving up on the edge!')
# TODO: this will happen when the last location fails? confirm
if not ms:
# This means, all samples which landed are in a cell which
# was never initially explored by S3CAM. Can happen, but
# possibility is very very low.
pass
#raise err.Fatal('Very low prob. of happening. Check code')
# else:
# status = SUCCESS
# if settings.debug:
# print('error is under control...')
# ms = [(rm, [], e_pc, status)]
return ms
def mdl_old(AA, prop, tol, step_sim, qgraph, q, XY, Y_, k, kmin, kmax):
X, Y = XY
assert(X.shape[1] == q.dim)
assert(Y_.shape[1] == q.dim)
assert(X.shape[0] == Y.shape[0] == Y_.shape[0])
#print(U.colorize('# samples = {}'.format(X.shape[0])))
if k >= kmin:
rm = AFM.Model(X, Y)
e_pc = rm.max_error_pc(X, Y)
if settings.debug:
err.imp('error%: {}'.format(e_pc))
error_dims = np.arange(len(e_pc))[np.where(e_pc >= tol)]
error_exceeds_tol = len(error_dims) > 0
refine = error_exceeds_tol
#err.warn('e%:{}, |e%|:{}'.format(e_pc, np.linalg.norm(e_pc, 2)))
if settings.debug_plot:
rm.plot(X, Y, tol, 'q:{}, err:{}'.format(q, e_pc))
gopts.plotting.show()
else:
refine = True
if refine:
ms = []
if settings.debug:
print('error exceeds...')
if k >= kmax:
assert(k == kmax)
status = KMAX_EXCEEDED
if settings.debug:
err.warn('max depth exceeded but the error > tol. Giving up!')
ms = [(rm, [], e_pc, status)]
else:
# first check for existance of a self loop
#if any(q.sat(Y)):
#err.imp('self loop exists')
for qi in it.chain([q], qgraph.neighbors(q)):
if settings.debug:
print('checking qi: ', qi)
Y__ = qi.sim(step_sim, Y_)
sat = qi.sat(Y__)
# TODO: If we are out of samples, we can't do much. Need to
# handle this situation better? Not sure? Request for more
# samples? Give up?
from matplotlib import pyplot as plt
if settings.debug and settings.plot:
#ax = plt.gca()
#ax.set_color_cycle(['b'])
#ax.set_ylim([-10, 10])
#ax.set_xlim([-2, 2])
gopts.plotting.acquire_global_fig()
gopts.plotting.single_color('b')
gopts.plotting.set_range((-2, 2), (-7, 7))
# TODO: pull out abstract state from concrete
# state Y_. q.a is no longer being mantained
# It can be mantained, but it would then be an
# abstract state w/o a valid abstraction
gopts.plotting.plot_abs_states(AA, prop, [q.a])
if any(sat):
gopts.plotting.plot(Y__[sat, 0], Y__[sat, 1], '*')
gopts.plotting.plot(Y_[sat, 0], Y_[sat, 1], '.')
gopts.plotting.plot_abs_states(AA, prop, [qi.a])
if any(sat):
rm_qseq = mdl_old(AA, prop, tol, step_sim, qgraph, qi, (X[sat], Y[sat]), Y__[sat], k+1, kmin, kmax)
l = [(rm_, [qi]+qseq_, e_pc_, status_)
for rm_, qseq_, e_pc_, status_ in rm_qseq]
ms.extend(l)
else:
if(qi == q):
pass # no self loop observed
else:
err.warn('out of samples...Giving up on the edge!')
#############################################
#############################################
# Delete unused traj seg: assumes 1-relational modeling
if settings.paper_plot:
ax = plt.gca()
remove = set()
for l in ax.lines:
xy = l.get_xydata()
if np.all(q.sat(xy[0:1, :])):
if not any([np.all(qi.sat(xy[-1:, :])) for qi in it.chain([q], qgraph.neighbors(q))]):
remove.add(l)
for l in remove:
ax.lines.remove(l)
##############################################
##############################################
# TODO: this will happen when the last location fails? confirm
if not ms:
# This means, all samples which landed are in a cell which
# was never initially explored by S3CAM. Can happen, but
# possibility is very very low.
raise err.Fatal('Very low prob. of happening. Check code')
else:
status = SUCCESS
if settings.debug:
print('error is under control...')
ms = [(rm, [], e_pc, status)]
return ms
def dummy_sub_model(q):
# stationary dynamics
A, b = np.eye(q.dim), np.zeros(q.dim)
# no error
e = zero2ic(q.dim)
dmap = rel.DiscreteAffineMap((A, b), e)
C, d = q.ival_constraints.poly()
p = ModelPartition(C, d, q)
future_partitions = []
# self loop
pnexts = [p]
sub_model = rel.KPath(dmap, p, pnexts, future_partitions)
sub_model.max_error_pc = np.zeros(q.dim)
sub_model.status = TERMINAL
return sub_model
# models can be split
def q_affine_models(prop, ntrain, step_sim, tol, include_err, qgraph, q):
"""Find affine models for a given Q
Parameters
----------
cell : cell
step_sim : 1 time step (delta_t) simulator
tol : each abs state is split further into num_splits cells
in order to meet: modeling error < tol (module ntests samples)
Returns
-------
pwa.SubModel()
Notes
------
"""
try_again = True
ntries = 1
#MAX_TRIES = 2
MAX_TRIES = 0
Xts, Yts = gopts.trajstore.get_traj(q.xcell.cell)
Xl, Yl = [Xts], [Yts]
while try_again:
last_node = not qgraph.edges(q)
Xi, Yi = q.get_rels(prop, step_sim, ntrain)
assert(not Xi.size == 0 or Yi.size == 0)
assert(not Yi.size == 0 or Xi.size == 0)
Xl.append(Xi)
Yl.append(Yi)
X, Y = np.concatenate(Xl), np.concatenate(Yl)
if X.size == 0:
# make sure it is the last node: had no edges
assert(last_node)
# The cell is completely inside the property
# If not, it means that the volume of Cell - prop is very
# small and a sample wasnt found in there.
assert(prop.final_cons.contains(q.ival_constraints))
return [dummy_sub_model(q)]
try:
regression_models = mdl_1relational(prop, tol, step_sim, qgraph, q, X, Y)
# we are done!
if regression_models:
try_again = False
# else try again
else:
err.warn('no model found')
if ntries > MAX_TRIES:
if last_node:
err.warn('giving up on last node')
else:
err.warn('can happen rarely...')
try_again = False
except AFM.UdetError:
try_again = True
#pass
if try_again:
print('trying again')
# double the number of samples and try again
ntrain *= 2
# repeat!
ntries += 1
# try again on failure, and settle with non relational models
#assert(regression_models)
sub_models = []
for rm, (qi, qj), e_pc, status in regression_models:
e = qi.errorQ(include_err, rm)
if gopts.model_type == 'poly':
dmap = rel.DiscretePolyMap(qi.modelQ(rm), e)
else:
dmap = rel.DiscreteAffineMap(qi.modelQ(rm), e)
#C, d = qi.ival_constraints.poly()
#partition_cluster = CLST.Q(qi)
#partition_cluster = CLST.Box(rm.X)
C, d = CLST.factory()(q, rm.X)
p = ModelPartition(C, d, qi)
future_partitions = []
pnexts = []
# Relational modeling is available. Add the edge which was
# used to model this transition.
# Add the immediate next reachable state
C, d = qj.ival_constraints.poly()
pnexts.append(ModelPartition(C, d, qj))
sub_model = rel.KPath(dmap, p, pnexts, future_partitions)
sub_model.max_error_pc = e_pc
sub_model.status = status
sub_models.append(sub_model)
return sub_models
class ModelPartition(pwa.Partition):
"""Partition: attaches additional information to pwa partition"""
uid_ = 0
def __init__(self, C, d, qi):
super(self.__class__, self).__init__(C, d, qi)
#self.pID = self.__class__.uid()
@classmethod
def uid(cls):
"""Generates a unique id for a partition"""
cls.uid_ += 1
return cls.uid_
def build_pwa_ct_model(AA, abs_states, sp, sys_sim):
"""Build a time continuous pwa model
Parameters
----------
AA :
AA is
abs_states :
abs_states is
sp :
sp is
sys_sim :
sys_sim is
Returns
-------
Notes
------
Builds a model with time as a bounded continuous variable.
i.e., models the behaviors resulting from several time steps and
not only the one chosen one.
"""
raise NotImplementedError
################################################
# ############# CEMETERY #######################
################################################
# #AA.plant_abs.get_abs_state_cell(abs_state.plant_state),
# def cell_affine_models(q, step_sim, ntrain, ntest, tol, include_err):
# """cell_affine_models
# Parameters
# ----------
# cell : cell
# step_sim : 1 time step (delta_t) simulator
# tol : each abs state is split further into num_splits cells
# in order to meet: modeling error < tol (module ntests samples)
# Returns
# -------
# pwa.SubModel()
# Notes
# ------
# """
# # XXX: Generate different samples for each time step or reuse?
# # Not clear!
# sub_models = []
# X, Y = q.getxy_ignoramous(ntrain, step_sim)
# rm = RegressionModel(X, Y)
# X, Y = q.getxy_ignoramous(ntest, step_sim)
# e_pc = rm.max_error_pc(X, Y) # error %
# if settings.debug:
# print('error%:', e_pc)
# #error = np.linalg.norm(e_pc, 2)
# # indices where error exceeds tol
# error_dims = np.arange(len(e_pc))[np.where(e_pc >= tol)]
# if len(error_dims) > 0:
# err.warn('splitting on e%:{}, |e%|:{}'.format(
# e_pc, np.linalg.norm(e_pc, 2)))
# for split_cell in q.split(axes=error_dims):
# sub_models_ = cell_affine_models(
# split_cell, step_sim, ntrain, ntest, tol, include_err)
# sub_models.extend(sub_models_)
# return sub_models
# else:
# #print('error%:', rm.max_error_pc(X, Y))
# A, b, C, d = q.modelQ(rm)
# e = q.error(include_err, X, Y, rm)
# dmap = pwa.DiscreteAffineMap(A, b, e)
# part = pwa.Partition(C, d, q)
# sub_model = pwa.SubModel(part, dmap)
# if __debug__:
# print('----------------Finalized------------------')
# return [sub_model]
# def getxy_rel_ignoramous_force_min_samples(cell1, cell2, force, N, sim, t0=0):
# """getxy_rel_ignoramous
# """
# xl = []
# yl = []
# sat_count = 0
# if __debug__:
# obs_cells = set()
# while True:
# x_array, y_array = getxy_ignoramous(cell1, N, sim, t0=0)
# if __debug__:
# for i in y_array:
# obs_cells.add(CM.cell_from_concrete(i, cell1.eps))
# print('reachable cells:', obs_cells)
# # satisfying indexes
# sat_array = cell2.ival_constraints.sat(y_array)
# sat_count += np.sum(sat_array)
# xl.append(x_array[sat_array])
# yl.append(y_array[sat_array])
# # If no sample is found and force is True, must keep sampling till
# # satisfying samples are found
# if (sat_count >= MIN_TRAIN) or (not force):
# break
# if __debug__:
# print('re-sampling, count:', sat_count)
# print('found samples: ', sat_count)
# return np.vstack(xl), np.vstack(yl)
# #AA.plant_abs.get_abs_state_cell(abs_state.plant_state),
# def cell_rel_affine_models(cell1, cell2, force, step_sim, ntrain, ntest, tol, include_err):
# """cell_affine_models
# Parameters
# ----------
# cell1 : source cell
# cell2 : target cell
# step_sim : 1 time step (delta_t) simulator
# tol : each abs state is split further into num_splits cells
# in order to meet: modeling error < tol (module ntests samples)
# Returns
# -------
# pwa.SubModel()
# Notes
# ------
# """
# # XXX: Generate different samples for each time step or reuse?
# # Not clear!
# sub_models = []
# X, Y = getxy_rel_ignoramous(cell1, cell2, force, ntrain, step_sim)
# # No samples found => no model
# training_samples_found = len(X) != 0
# if not training_samples_found:
# return [None]
# rm = RegressionModel(X, Y)
# X, Y = getxy_rel_ignoramous(cell1, cell2, True, ntest, step_sim)
# testing_samples_found = len(X) != 0
# # If valid samples are found, compute e_pc (error %) and dims
# # where error % >= given tol
# if testing_samples_found:
# e_pc = rm.max_error_pc(X, Y)
# error_dims = np.arange(len(e_pc))[np.where(e_pc >= tol)]
# # Otherwise, forget it!
# else:
# e_pc = None
# error_dims = []
# if __debug__:
# print('error%:', e_pc)
# if len(error_dims) > 0:
# err.warn('splitting on e%:{}, |e%|:{}'.format(
# e_pc, np.linalg.norm(e_pc, 2)))
# for split_cell1 in cell1.split(axes=error_dims):
# sub_models_ = cell_rel_affine_models(
# split_cell1, cell2, False, step_sim, ntrain, ntest, tol, include_err)
# sub_models.extend(sub_models_)
# return sub_models
# else:
# A, b = rm.A, rm.b
# C1, d1 = cell1.ival_constraints.poly()
# C2, d2 = cell2.ival_constraints.poly()
# e = rm.error(X, Y) if (include_err and testing_samples_found) else None
# dmap = rel.DiscreteAffineMap(A, b, e)
# part1 = rel.Partition(C1, d1, cell1)
# part2 = rel.Partition(C2, d2, cell2)
# sub_model = rel.Relation(part1, part2, dmap)
# if __debug__:
# print('----------------Finalized------------------')
# return [sub_model]
# def refine_rel_model_based(
# AA, error_paths, pi_seq_list, sp, sys_sim, sys, prop):
# '''does not handle pi_seq_list yet'''
# # abs_state relations: maps an abs_state to other abs_states
# # reachable in one time step
# abs_relations = defaultdict(set)
# for path in error_paths:
# # abs_state_1 -> abs_state_2
# for a1, a2 in U.pairwise(path):
# abs_relations[a1].add(a2)
# flat_relations = []
# for abs_state, rch_states in abs_relations.iteritems():
# flat_relation = list(itertools.product([abs_state], rch_states))
# flat_relations.extend(flat_relation)
# pwa_model = build_pwa_model(
# AA, flat_relations, sp, gopts.max_model_error,
# gopts.model_err, 'rel')
# if __debug__:
# sim_n_plot(error_paths, pwa_model, AA, sp)
# check4CE(pwa_model, error_paths, sys.sys_name, 'rel', AA, sys, prop, sp, gopts.bmc_engine)
# def q_affine_models(ntrain, ntest, step_sim, tol, include_err, qgraph, q):
# """cell_affine_models
# Parameters
# ----------
# cell : cell
# step_sim : 1 time step (delta_t) simulator
# tol : each abs state is split further into num_splits cells
# in order to meet: modeling error < tol (module ntests samples)
# Returns
# -------
# pwa.SubModel()
# Notes
# ------
# """
# # XXX: Generate different samples for each time step or reuse?
# # Not clear!
# sub_models = []
# X, Y = q.getxy_ignoramous(ntrain, step_sim, qgraph)
# rm = RegressionModel(X, Y)
# X, Y = q.getxy_ignoramous(ntest, step_sim)
# e_pc = rm.max_error_pc(X, Y) # error %
# if __debug__:
# print('error%:', e_pc)
# #error = np.linalg.norm(e_pc, 2)
# # error exceeds tol in error_dims
# error_dims = np.arange(len(e_pc))[np.where(e_pc >= tol)]
# if len(error_dims) > 0:
# err.warn('splitting on e%:{}, |e%|:{}'.format(
# e_pc, np.linalg.norm(e_pc, 2)))
# for split_q in q.split(axes=error_dims):
# sub_models_ = q_affine_models(
# ntrain, ntest,
# split_q, step_sim, tol, include_err)
# sub_models.extend(sub_models_)
# return sub_models
# else:
# A, b, C, d = q.modelQ(rm)
# e = q.errorQ(include_err, X, Y, rm)
# dmap = pwa.DiscreteAffineMap(A, b, e)
# part = pwa.Partition(C, d, q)
# sub_model = pwa.SubModel(part, dmap)
# if __debug__:
# print('----------------Finalized------------------')
# return [sub_model]
# def get_qs_from_error_paths(sp, AA, error_paths, pi_seqs):
# if AA.num_dims.pi == 0:
# # traversed_abs_state_set
# tas = {state for path in error_paths for state in path}
# qs = [Qx(abs_state2cell(a, AA)) for a in tas]
# else:
# pi_eps = sp.pi_ref.eps
# # collect all pi which were encountered with the abs_state
# abs_state_pi = defaultdict(set)
# for path, pi_seq in zip(error_paths, pi_seqs):
# for abs_state, pi in zip(path[:-1], pi_seq):
# abs_state_pi[abs_state].add(pi)
# qs = []
# for abs_state, pi_ic_list in abs_state_pi.iteritems():
# xcell = abs_state2cell(abs_state, AA)
# for pi_ic in pi_ic_list:
# wcell = ic2cell(pi_ic, pi_eps)
# qs.append(Qxw(xcell, wcell, sp.pi_ref.i_cons))
# return qs
#
# def q_graph_models(ntrain, ntest, step_sim, tol, include_err, qgraph):
# qmodels = {}
# # Make a model for every q in the graph
# for q in qgraph:
# X, Y = q.get_rels(ntrain, step_sim)
# models = mdl(tol, qgraph, q, (X, Y), X, K)
# qmodels[q] = models
# return qmodels
# def mdl_old(tol, step_sim, qgraph, q, (X, Y), Y_, k):
# assert(X.shape[1] == q.dim)
# assert(Y_.shape[1] == q.dim)
# assert(X.shape[0] == Y.shape[0] == Y_.shape[0])
# if k == -1:
# err.warn('max depth exceeded but the error > tol. Giving up!')
# return []
# rm = RegressionModel(X, Y)
# e_pc = rm.max_error_pc(X, Y)
# err.imp('error%: {}'.format(e_pc))
# error_dims = np.arange(len(e_pc))[np.where(e_pc >= tol)]
# error_exceeds_tol = len(error_dims) > 0
# #err.warn('e%:{}, |e%|:{}'.format(e_pc, np.linalg.norm(e_pc, 2)))
# if error_exceeds_tol:
# ms = []
# for qi in qgraph.neighbors(q):
# Y__ = qi.sim(step_sim, Y_)
# sat = qi.sat(Y__)
# # TODO: If we are out of samples, we can't do much. Need to
# # handle this situation better? Not sure? Request for more
# # samples? Give up?
# if any(sat):
# rm_qseq = mdl(tol, step_sim, qgraph, qi, (X[sat], Y[sat]), Y__[sat], k-1)
# l = [(rm_, [q]+qseq_, e_pc_) for rm_, qseq_, e_pc_ in rm_qseq]
# ms.extend(l)
# else:
# err.warn('out of samples...Giving up!')
# # The loop never ran due to q not having any neighbors,
# # Or, no samples were left. We do the best with what we have
# # then.
# if not ms:
# ms = [(rm, [q], e_pc)]
# return ms
# else:
# return [(rm, [q], e_pc)]
# def lala(pwa_model, depth, init_partitions, prop_partitions, sys_name, model_type, AA, sys, prop, sp):
# # Extend both init set and final set to include inputs if any
# dummy_cons = top2ic(AA.num_dims.pi) # T <=> [-inf, inf]
# safety_prop = IntervalCons.concatenate(sp.final_cons, dummy_cons)
# init_cons = (sp.init_cons if AA.num_dims.pi == 0
# else IntervalCons.concatenate(
# sp.init_cons,
# sp.pi_ref.i_cons))
# xs = ['x'+str(i) for i in range(AA.num_dims.x)]
# ws = ['w'+str(i) for i in range(AA.num_dims.pi)]
# # Order is important
# vs = xs + ws
# bmc = BMC.factory(
# gopts.bmc_engine,
# sys,
# prop,
# vs,
# pwa_model, init_cons, safety_prop,
# init_partitions,
# prop_partitions,
# gopts.construct_path,
# '{}_{}'.format(sys_name, model_type),
# model_type)
# return bmc
# if TESTCODE:
# bmc = lala(pwa_sys_prop.pwa_model, depth,
# pwa_sys_prop.init_partitions,
# pwa_sys_prop.final_partitions,
# sys.sys_name, 'dft', AA, sys, prop, sp)
# list(bmc.print_all_CE(0))
# qgraph_ref_gen = bmc.print_all_CE(1)
# qgraphs = list(qgraph_ref_gen)
# for qgraph_ref in qgraphs:
# U.pause('qgraph refined, checking it')
# pwa_sys_prop, depth = get_pwa_system(sys, prop, sp, qgraph_ref)
# bmc =\
# lala(pwa_sys_prop.pwa_model, depth,
# pwa_sys_prop.init_partitions,
# pwa_sys_prop.final_partitions,
# sys.sys_name, 'dft', AA, sys, prop, sp)
# qqgraph_ref_gen = bmc.print_all_CE(2)
# qqgraphs = list(qqgraph_ref_gen)
# if not qqgraphs:
# U.pause('refinement fails: NO CE!')
# # bmc = lala(pwa_sys_prop.pwa_model, depth,
# # pwa_sys_prop.init_partitions,
# # pwa_sys_prop.final_partitions,
# # sys.sys_name, 'dft', AA, sys, prop, sp)
# # list(bmc.print_all_CE(0))
# exit()
#TESTCODE = False
def q_affine_models_old(AA, prop, ntrain, step_sim, tol, include_err, qgraph, q):
"""Find affine models for a given Q
Parameters
----------
cell : cell
step_sim : 1 time step (delta_t) simulator
tol : each abs state is split further into num_splits cells
in order to meet: modeling error < tol (module ntests samples)
Returns
-------
pwa.SubModel()
Notes
------
"""
# Is non relational modeling being done? No, by default
pwa_non_relational = False
sub_models = []
try_again = True
ntries = 1
MAX_TRIES = 2
while try_again:
last_node = not qgraph.edges(q)
X, Y = q.get_rels(prop, step_sim, ntrain)
assert(not X.size == 0 or Y.size == 0)
assert(not Y.size == 0 or X.size == 0)
if X.size == 0:
# make sure it is the last node: had no edges
assert(last_node)
# The cell is completely inside the property
# If not, it means that the volume of Cell - prop is very
# small and a sample wasnt found in there.
assert(prop.final_cons.contains(q.ival_constraints))
return [dummy_sub_model(q)]
try:
regression_models = mdl_old(AA, prop, tol, step_sim, qgraph, q, (X, Y), X, k=0, kmin=KMIN, kmax=KMAX)
# we are done!
if regression_models:
try_again = False
# else try again
else:
err.warn('no model found')
if ntries > MAX_TRIES:
if last_node:
err.warn('giving up')
try_again = False
else:
err.warn('can happen rarely...')
except AFM.UdetError:
pass
print('trying again')
# double the number of samples and try again
ntrain *= 2
# repeat!
ntries += 1
# try again on failure, and settle with non relational models
if not regression_models:
err.warn('No model found for q: {}'.format(q))
regression_models = mdl_old(AA, prop, np.inf, step_sim, qgraph, q, (X, Y), X, k=0, kmin=0, kmax=1)
assert(regression_models)
# No model found, get a non-relational model as the worst case
pwa_non_relational = True
# # TODO: fix this messy handling...?
# if not regression_models:
# # no model was found...node must be a sink node, otherwise
# # such a condition is not possible!
# # It must be due to missing neighbors of th sink node.
# assert(qgraph.out_degree(q) == 0)
# # Now request for the model once more but given an infinite
# # tolerance so that we always get one. K=1 for sanity's sake,
# # as a depth > 1 should never be reached with tol = Inf.
# regression_models = mdl(np.inf, step_sim, qgraph, q, (X, Y), X, 1)
# # Due to the tolerance being Inf, we should get back a single
# # model
# assert(len(regression_models) == 1)
for rm, q_seq, e_pc, status in regression_models:
A, b = q.modelQ(rm)
e = q.errorQ(include_err, rm)
dmap = rel.DiscreteAffineMap((A, b), e)
C, d = q.ival_constraints.poly()
p = pwa.Partition(C, d, q)
future_partitions = []
pnexts = []
#if len(q_seq) == 1:
# if q_seq is empty, all its neighbours are reachable
if not q_seq:
# Relational modeling is currently forced as KMIN = 1
#assert(False)
assert(pwa_non_relational)
# No relational modeling was done. Use the relations from
# the graph. Add transitions to cell only seen in the
# subgraph.
# Force self loops just in case. The other option is to
# examin in the mdl() function if a self loop is possible
#err.warn('forcing self loops for every location!')
for qi in it.chain([q], qgraph.neighbors(q)):
C, d = qi.ival_constraints.poly()
#pnexts.append(pwa.Partition(C, d, qi))
pnexts = [pwa.Partition(C, d, qi)]
sub_model = rel.KPath(dmap, p, pnexts, future_partitions)
sub_model.max_error_pc = e_pc
sub_model.status = status
sub_models.append(sub_model)
# Relational modeling is available. Add the edge which was
# used to model this transition.
else:
# Add the immediate next reachable state
qnext = q_seq[0]
C, d = qnext.ival_constraints.poly()
pnexts.append(pwa.Partition(C, d, qnext))
# Add the states reachable in future
for qi in q_seq[1:]:
C, d = qi.ival_constraints.poly()
future_partitions.append(pwa.Partition(C, d, qi))
sub_model = rel.KPath(dmap, p, pnexts, future_partitions)
sub_model.max_error_pc = e_pc
sub_model.status = status
sub_models.append(sub_model)
return sub_models
| bsd-2-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/covariance/__init__.py | 10 | 1197 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope, EllipticEnvelop
__all__ = ['EllipticEnvelop',
'EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| agpl-3.0 |
pw31/GGchem | tools/Plot_phase.py | 1 | 6922 | import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, ScalarFormatter, LogLocator
from matplotlib.backends.backend_pdf import PdfPages
plt.rcParams['axes.linewidth'] = 1.5
single_figures = 0
if (single_figures==0): pp=PdfPages('ggchem.pdf')
file = 'Static_Conc_2D.dat'
#file = 'results/Static_Conc_2D_nocond.dat'
#file = 'results/Static_Conc_2D_eqcond.dat'
data = open(file)
dummy = data.readline()
dimens = data.readline()
dimens = np.array(dimens.split())
NELEM = int(dimens[0])
NMOLE = int(dimens[1])
NDUST = int(dimens[2])
NPOINT = int(dimens[3])
header = data.readline()
data.close()
keyword = np.array(header.split())
dat = np.loadtxt(file,skiprows=3)
bar = 1.E+6 # 1 bar in dyn/cm2
Tg = dat[:,0] # T [K]
nHtot = dat[:,1] # n<H> [cm-3]
lognH = np.log10(nHtot)
press = dat[:,2] # p [dyn/cm2]
logp = np.log10(press/bar)
Tmin = np.min(Tg)
Tmax = np.max(Tg)
pmin = np.min(logp)
pmax = np.max(logp)
#Tmin = 100
#Tmax = 460
iii = np.where((Tg>=Tmin) & (Tg<=Tmax) & (logp>=pmin) & (logp<=pmax))[0]
sep = 20
if (Tmax-Tmin>1500): sep=100
if (Tmax-Tmin>1000): sep=50
if (Tmax-Tmin<600): sep=20
if (Tmax-Tmin<400): sep=10
for sp in range(3,4+NELEM+NMOLE):
dat[:,sp] = dat[:,sp]-lognH[:] # log(nmol) -> log(nmol/n<H>) for molecules
dat = np.array(dat)
colo = ['gold','cadetblue','coral','blue','beige','chartreuse','darkgreen','red','darkorchid','aqua','burlywood','chocolate','black','darkkhaki','pink','moccasin','cornflowerblue','darkgray']
#'aquamarine','darkgoldenrod','darkorange','crimson','darkcyan','bisque','darkmagenta','darkolivegreen'
Ncolor = len(colo)
#================== where are the elements? ================
ellist = ['H','C','O','N','SI','S','NA','CL','CA','TI','K','AL','MG','FE','LI','F','P','NI','MN','CR','ZN','ZR','RB','CU','B','BR','V','SR','W','el']
allist = [' ',' ',' ',' ','Si',' ','Na','Cl','Ca','Ti',' ','Al','Mg','Fe','Li',' ',' ','Ni','Mn','Cr','Zn','Zr','Rb','Cu',' ','Br',' ','Sr',' ','+']
exlist = [' He HE ',' Cl CL Ca CA Cr CR Co Cu CU ',' ',' Na NA Ni NI ',' ',' Si SI Sr SR ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' Fe FE ',' ',' ',' ',' ',' ',' ',' ',' ',' Br BR ',' ',' ',' ',' ',' ']
titels = ['hydrogen','carbon','oxygen','nitrogen','silicon','sulphur','sodium','chlorine','calcium','titanium','potassium','aluminum','magnesium','iron','lithium','fluorine','phosphorus','nickel','manganese','chromium','zinc','zirconium','rubidium','copper','boron','bromine','vanadium','strontium','tungsten','charge carriers']
xl = np.zeros([NPOINT],dtype=np.float)
xr = np.zeros([NPOINT],dtype=np.float)
yl = np.zeros([NPOINT],dtype=np.float)
yr = np.zeros([NPOINT],dtype=np.float)
for ix in range(0,NPOINT):
xl[ix] = np.exp(np.log(Tmax)+np.log(Tmin/Tmax)*np.max([0,ix-0.5])/(NPOINT-1))
xr[ix] = np.exp(np.log(Tmax)+np.log(Tmin/Tmax)*np.min([NPOINT-1,ix+0.5])/(NPOINT-1))
for iy in range(0,NPOINT):
yl[iy] = pmax+(pmin-pmax)*np.max([0,iy-0.5])/(NPOINT-1)
yr[iy] = pmax+(pmin-pmax)*np.min([NPOINT-1,iy+0.5])/(NPOINT-1)
for i in range(0,30):
el = ellist[i]
al = allist[i]
ex = exlist[i]
titel = titels[i]
print titel+" ..."
fig,ax = plt.subplots(figsize=(7,6))
nmax = np.float(-100)
splist = []
dat2 = np.zeros([len(dat[:,0]),len(dat[0,:])])
for sp in range(3,4+NELEM+NMOLE)+range(4+NELEM+NMOLE+NDUST,4+NELEM+NMOLE+2*NDUST):
spname = keyword[sp]
ind = str.find(spname,el)
if (ind < 0):
ind = str.find(spname,al)
if (ind < 0 and el=='el'):
ind = str.find(spname,'-')
if (ind >= 0):
next1 = spname[ind:ind+2]
next2 = spname[ind-1:ind+1]
#print keyword[sp],next1,str.find(ex,next1),len(next1)
if (len(next1)==1 or str.find(ex,next1)==-1 or spname=='SIS'):
if (next2!='MN' and next2!='ZN'):
text1 = spname[ind+len(el):]
text2 = ''
for c in text1:
if c.isdigit():
text2=text2+c
else:
break
stoich = 1.0
if (len(text2)>0): stoich = float(text2)
j1=str.find(spname,'(')
j2=str.find(spname,')')
if (j1>=0 and j1<ind and ind<j2):
text2=spname[j2+1:j2+2]
stoich=stoich*float(text2)
print el,spname,stoich
lstoich = np.log10(stoich)
dat2[:,sp] = dat[:,sp] + lstoich
ymax = np.max(dat2[:,sp])
nmax = np.max([nmax,ymax])
if (ymax>-100): splist.append(sp)
if (nmax==-100): continue
splist = np.array(splist)
print keyword[splist]
implist = []
Nc = 0
ii = 0
for iy in range(0,NPOINT):
for ix in range(0,NPOINT):
cmax = np.max(dat2[ii,splist])
spmax = np.where(dat2[ii,splist]==cmax)[0][0]
#print keyword[splist[0:3]]
#print dat[ii,splist[0:3]]
#print dat2[ii,splist[0:3]]
if (spmax not in implist):
lab = keyword[splist[spmax]]
if ('[l]' in lab):
lab=lab[1:]
else:
if (lab[0]=='n'): lab=lab[1:]+'[s]'
implist.append(spmax)
cc = implist.index(spmax)
rect = patches.Rectangle((xl[ix],yl[iy]),xr[ix]-xl[ix],yr[iy]-yl[iy],
linewidth=0.02,edgecolor=colo[cc],facecolor=colo[cc],label=lab)
ax.add_patch(rect)
else:
cc = implist.index(spmax)
rect = patches.Rectangle((xl[ix],yl[iy]),xr[ix]-xl[ix],yr[iy]-yl[iy],
linewidth=0.02,edgecolor=colo[cc],facecolor=colo[cc])
ax.add_patch(rect)
#print ix,iy,Tg[ii],logp[ii],keyword[splist[spmax]],cc
ii = ii+1
implist = np.array(keyword[splist[implist]])
print implist
plt.title(titel,fontsize=20)
plt.xlabel(r'$T\ \mathrm{[K]}$',fontsize=22)
plt.ylabel(r'$\mathrm{log}_{10}\ p\ \mathrm{[bar]}$',fontsize=22)
plt.xlim(Tmin,Tmax)
plt.ylim(pmax,pmin)
if (Tmax/Tmin>10):
plt.xscale('log')
else:
minorLocator = MultipleLocator(sep)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
plt.tick_params(axis='both', labelsize=18)
plt.tick_params('both', length=11, width=2, which='major')
plt.tick_params('both', length=8, width=1.5, which='minor')
#minorLocator = MultipleLocator(1.0)
#if (nmax-nmin>50): minorLocator = MultipleLocator(2.0)
#if (nmax-nmin>100): minorLocator = MultipleLocator(5.0)
#if (nmax-nmin>200): minorLocator = MultipleLocator(10.0)
#ax.yaxis.set_minor_locator(minorLocator)
leg = plt.legend(loc='lower left',fontsize=13,fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.tight_layout()
if (single_figures==0): plt.savefig(pp,format='pdf')
if (single_figures==1): fig.savefig('phase_'+titel+'.png')
plt.clf()
if (single_figures==0): pp.close()
if (single_figures==0): print '... written output to ggchem.pdf.'
| gpl-3.0 |
pablodecm/cartographer | cartographer/tests/test_hyper_rectangle_coverer.py | 1 | 1966 | from cartographer.coverers import HyperRectangleCoverer
from sklearn.datasets.samples_generator import make_blobs
from sklearn.utils.testing import assert_true, assert_raises
import numpy as np
def test_one_dimension_int_parameters():
X, true_labels = make_blobs(n_samples=1000, n_features=1)
n_intervals = 10
hrc = HyperRectangleCoverer(n_intervals, 0.3)
hrc.fit(X)
assert_true(hrc.lowerbounds.shape[0] == n_intervals**X.shape[1])
assert_true(hrc.upperbounds.shape[0] == n_intervals**X.shape[1])
m_matrix = hrc.transform(X)
assert_true(m_matrix.shape[1] == n_intervals**X.shape[1])
# every sample is in at least one partition
assert_true(np.sum(m_matrix.any(axis=1)) == X.shape[0])
def test_two_dimension_int_parameters():
X, true_labels = make_blobs(n_samples=1000, n_features=2)
hrc = HyperRectangleCoverer(10, 0.3)
n_intervals = 10
hrc.fit(X)
assert_true(hrc.lowerbounds.shape[0] == n_intervals**X.shape[1])
assert_true(hrc.upperbounds.shape[0] == n_intervals**X.shape[1])
m_matrix = hrc.transform(X)
assert_true(m_matrix.shape[1] == n_intervals**X.shape[1])
def test_three_dimension_int_wrong_cases():
X, true_labels = make_blobs(n_samples=1000, n_features=3)
intervals = [5, 10, 15]
overlap = [0.1, 0.2, 0.3]
hrc = HyperRectangleCoverer(intervals, overlap) # right case
hrc_wrong_intervals = HyperRectangleCoverer([10, 5], 0.3)
hrc_wrong_overlap = HyperRectangleCoverer(10, [0.1, 0.2])
assert_raises(ValueError, hrc_wrong_intervals.fit, X)
assert_raises(ValueError, hrc_wrong_overlap.fit, X)
hrc.fit(X)
assert_true(hrc.lowerbounds.shape[0] == np.prod(intervals))
assert_true(hrc.upperbounds.shape[0] == np.prod(intervals))
m_matrix = hrc.transform(X)
assert_true(m_matrix.shape[1] == np.prod(intervals))
assert_raises(ValueError, hrc_wrong_overlap.fit, X, X)
assert_raises(ValueError, hrc_wrong_overlap.transform, X, X)
| mit |
blbarker/spark-tk | regression-tests/sparktkregtests/testcases/models/svm_2d_slope1_test.py | 10 | 3131 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Validate svm"""
import unittest
from sparktkregtests.lib import sparktk_test
class Svm2DSlope1(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build the frame needed for these tests"""
super(Svm2DSlope1, self).setUp()
sch2 = [("Class", int), # Class is either 1 or 0.
("Dim_1", float),
("Dim_2", float)]
train_file = self.get_file("SVM-2F-train-50X50_1SlopePlus0.csv")
test_file = self.get_file("SVM-2F-test-50X50_1SlopePlus0.csv")
self.trainer = self.context.frame.import_csv(train_file,
schema=sch2)
self.frame = self.context.frame.import_csv(test_file,
schema=sch2)
def test_svm_model_test(self):
"""Test with train and test data generated with same hyperplane"""
model = self.context.models.classification.svm.train(self.trainer,
["Dim_1", "Dim_2"],
"Class")
results = model.test(self.frame)
# assert that model reports acceptable accuracy, etc.
self.assertEqual(1.0, results.recall)
self.assertEqual(1.0, results.accuracy)
self.assertEqual(1.0, results.precision)
self.assertEqual(1.0, results.f_measure)
# Now we verify the confusion matrix contains the expected results.
cf = results.confusion_matrix
self.assertEqual(cf['Predicted_Pos']['Actual_Pos'], 95)
self.assertEqual(cf['Predicted_Neg']['Actual_Pos'], 0)
self.assertEqual(cf['Predicted_Pos']['Actual_Neg'], 0)
self.assertEqual(cf['Predicted_Neg']['Actual_Neg'], 105)
def test_svm_model_predict(self):
"""Test the predict function"""
model = self.context.models.classification.svm.train(self.trainer,
["Dim_1", "Dim_2"],
"Class")
predicted_frame = model.predict(self.frame)
outcome = predicted_frame.to_pandas()
# Verify that values in 'predict' and 'Class' columns match.
for index, row in outcome.iterrows():
self.assertEqual(row["Class"], row["predicted_label"])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
chiffa/Karyotype_retriever | src/Drawing_functions.py | 1 | 5920 | import numpy as np
from matplotlib import pyplot as plt
from matplotlib import gridspec
import supporting_functions as sf
from chiffatools.dataviz import smooth_histogram
from scipy.stats import beta
def multilane_plot(main_pad, multi_pad_list):
def morph_shape(arr, size):
return np.repeat(arr[np.newaxis, :], size, axis=0)
step_size = 200/(1+len(multi_pad_list))
plt.imshow(morph_shape(main_pad, 200), interpolation='nearest', cmap='spectral')
for i, array in enumerate(multi_pad_list):
j = len(multi_pad_list) - i
plt.imshow(morph_shape(array, j*step_size),
interpolation='nearest', cmap='coolwarm',
vmin=-1, vmax=1)
def remainder_plot(remainders, FDR=0.005):
plt.plot(remainders, 'k.')
plt.plot(sf.get_outliers(remainders, FDR), 'r.')
def plot_classification(parsed, chr_tag, current_lane, segment_averages, binarized, FDR):
ax1 = plt.subplot(311)
multilane_plot(chr_tag, [parsed, binarized])
plt.setp(ax1.get_xticklabels(), fontsize=6)
ax2 = plt.subplot(312, sharex=ax1)
remainder_plot(current_lane, FDR)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.subplot(313, sharex=ax1, sharey=ax2)
remainder_plot(current_lane - segment_averages, FDR)
plt.show()
def multi_level_plot(chr_tag, starting_dataset, regression, final_remainder,
list_of_regressions, HMM_decisions, remainder_list,
HMM_states, chromosome_state, arms_state,):
ax1 = plt.subplot(511)
remainder_plot(final_remainder, FDR=0.01)
plt.setp(ax1.get_xticklabels(), fontsize=6)
ax2 = plt.subplot(512, sharex=ax1)
plt.plot(starting_dataset, 'k.')
plt.plot(regression)
plt.setp(ax2.get_xticklabels(), fontsize=6)
ax3 = plt.subplot(513, sharex=ax1)
multilane_plot(chr_tag, list_of_regressions)
plt.setp(ax3.get_xticklabels(), visible=False)
plt.ylim(0, 200)
ax4 = plt.subplot(514, sharex=ax1)
multilane_plot(chr_tag, HMM_decisions)
plt.setp(ax4.get_xticklabels(), visible=False)
plt.ylim(0, 200)
ax5 = plt.subplot(515, sharex=ax1)
multilane_plot(chr_tag, remainder_list)
plt.setp(ax5.get_xticklabels(), visible=False)
plt.ylim(0, 200)
plt.show()
ax1 = plt.subplot(511)
plt.plot(starting_dataset, 'k.')
plt.plot(regression)
plt.setp(ax1.get_xticklabels(), fontsize=6)
ax2 = plt.subplot(512, sharex=ax1)
multilane_plot(chr_tag, [HMM_states])
plt.setp(ax2.get_xticklabels(), visible=False)
plt.ylim(0, 200)
ax3 = plt.subplot(513, sharex=ax1)
multilane_plot(chr_tag, [chromosome_state, arms_state])
plt.setp(ax3.get_xticklabels(), visible=False)
plt.ylim(0, 200)
ax4 = plt.subplot(514, sharex=ax1)
c_remainder = sf.get_outliers(final_remainder, 0.005)
c_remainder[np.isnan(c_remainder)] = 0
multilane_plot(chr_tag, [regression, c_remainder])
plt.setp(ax4.get_xticklabels(), visible=False)
plt.ylim(0, 200)
plt.show()
def show_breakpoints(breakpoints, color = 'k'):
"""
plots the breakpoints
:param breakpoints:
:return:
"""
for point in breakpoints:
plt.axvline(x=point, color=color)
def inflate_support(length, breakpoints, values=None):
"""
transforms 1D representation of chromosomes into a 2d array that can be rendered with an eventual filter on breakpoints
:param length:
:param breakpoints:
:param values:
:return:
"""
if values is None:
values = np.array(range(0, len(breakpoints)))
if breakpoints[-1]< length:
breakpoints.append(length)
ret_array = np.zeros((100, length))
for _i in range(1, values.shape[0]):
ret_array[:, breakpoints[_i-1]: breakpoints[_i]] = values[_i]
return ret_array
def inflate_tags(_1D_array, width=100):
"""
reshapes a 1_d array into a 2d array that can be rendered
:param _1D_array:
:param width:
:return:
"""
nar = _1D_array.reshape((1, _1D_array.shape[0]))
return np.repeat(nar, width, axis=0)
def plot(_list):
plt.imshow(_list, interpolation='nearest', cmap='coolwarm')
plt.show()
def plot2(_list, chr_brps, centromere_brps, line_names=None):
if not line_names:
line_names = range(1, _list.shape[0]+1)
inflated_table = np.vstack([inflate_tags(x[0, :], 25) for x in np.split(_list, _list.shape[0])])
gs = gridspec.GridSpec(4, 4)
ax1 = plt.subplot(gs[:-1, :])
plt.imshow(inflated_table, interpolation='nearest', cmap='coolwarm')
show_breakpoints([0] + chr_brps + [_list.shape[1]], 'k')
show_breakpoints(list(set(centromere_brps) - set(chr_brps)), 'g')
ax2 = plt.subplot(gs[-1, :], sharex=ax1)
red_run = np.nanmean((_list > 0).astype(np.float), 0)
blue_run = np.nanmean((_list < 0).astype(np.float), 0)
stack = np.hstack((blue_run, red_run))
mean = np.mean(stack)
std = np.std(stack)
_alpha = ((1 - mean)/std**2 - 1/mean)*mean**2
_beta = _alpha*(1/mean-1)
r = beta.rvs(_alpha, _beta, size=1000)
_min, _max = beta.interval(0.95, _alpha, _beta)
plt.plot(blue_run, 'b')
plt.plot(red_run, 'r')
plt.axhline(y=_min, color='g')
plt.axhline(y=_max, color='g')
show_breakpoints([0] + chr_brps + [_list.shape[1]], 'k')
show_breakpoints(list(set(centromere_brps) - set(chr_brps)), 'g')
chr_arm_locations, chr_arm_names = sf.align_chromosome_edges(chr_brps, centromere_brps)
ax1.set_xticks(chr_arm_locations)
ax1.set_xticklabels(chr_arm_names, rotation='vertical')
ax1.set_yticks(range(0, _list.shape[0]*25+1, 25))
ax1.set_yticklabels(line_names)
ax2.set_xticks(chr_arm_locations)
ax2.set_xticklabels(chr_arm_names, rotation='vertical')
plt.show()
smooth_histogram(r, 'b')
smooth_histogram(stack)
plt.axvline(x=_max, color='g')
plt.axvline(x=_min, color='g')
plt.show()
| bsd-3-clause |
yanlend/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
dinos66/termAnalysis | forTateDataset/checkCaptionSummaryUrls.py | 1 | 5434 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name:GDD_parser
# Purpose: This .py file extracts urls from json twitter files.
#
# Required libs: python-dateutil,pyparsing,numpy,matplotlib,networkx
# Author: konkonst
#
# Created: 20/08/2013
# Copyright: (c) ITI (CERTH) 2013
# Licence: <apache licence 2.0>
#-------------------------------------------------------------------------------
import json,codecs,os,glob,time, pickle, collections, requests
import concurrent.futures
from os import walk
urlsInParallel = 10
targetpath = './data/artworks'
session = requests.Session()
def load_url(url, timeout):
try:
resp = session.head(url, allow_redirects=True, timeout = timeout)
trueUrl = resp.url
except:
trueUrl = 'else'
pass
return trueUrl
print('check summary and caption urls')
try:
potentialsummaryUrls = pickle.load(open('./data/artworks_tmp/potentialsummaryUrls.pck','rb'))
potentialcaptionUrls = pickle.load(open('./data/artworks_tmp/potentialcaptionUrls.pck','rb'))
print('using processed potential summaryUrls')
except:
potentialsummaryUrls, potentialcaptionUrls = [], []
for dirname, dirnames, filenames in walk(targetpath):
for filename in filenames:
fileId = filename[:-5]
filepath = '/'.join([dirname,filename])
fileopen = open(filepath).read()
jsonfile = json.loads(fileopen)
mainUrl = jsonfile['url']
potentialsummaryUrls.append(mainUrl+'/text-summary')
potentialcaptionUrls.append(mainUrl+'/text-display-caption')
print('potentialsummaryUrls are %s and potentialcaptionUrls are %s' %(len(potentialsummaryUrls), len(potentialcaptionUrls)))
try:
existingsummaryUrls = pickle.load(open('./data/artworks_tmp/existingsummaryUrls.pck','rb'))
print('using processed existingsummaryUrls')
except:
existingsummaryUrls = set()
pass
try:
existingcaptionUrls = pickle.load(open('./data/artworks_tmp/existingcaptionUrls.pck','rb'))
print('using processed existingcaptionUrls')
except:
existingcaptionUrls = set()
pass
print('existingsummaryUrls are %s and existingcaptionUrls are %s' %(len(existingsummaryUrls), len(existingcaptionUrls)))
t = time.time()
for i in range(3):
if i:
print('Repassing to ensure full unshortening')
tmpsummaryUrls = [x for x in potentialsummaryUrls if x not in existingsummaryUrls]
summaryShorts = [tmpsummaryUrls[x:x+urlsInParallel] for x in range(0, len(tmpsummaryUrls), urlsInParallel)]
tmpcaptionUrls = [x for x in potentialcaptionUrls if x not in existingcaptionUrls]
captionShorts = [tmpcaptionUrls[x:x+urlsInParallel] for x in range(0, len(tmpcaptionUrls), urlsInParallel)]
summaryurlLength = len(summaryShorts)
print('There are '+str(summaryurlLength)+' batches of '+str(urlsInParallel)+' summary urls')
tssumm = int(summaryurlLength/urlsInParallel)
captionurlLength = len(captionShorts)
print('There are '+str(captionurlLength)+' batches of '+str(urlsInParallel)+' caption urls')
tscapt = int(captionurlLength/urlsInParallel)
with concurrent.futures.ThreadPoolExecutor(max_workers=1000) as executor:
for idx,tmpshorts in enumerate(summaryShorts):
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(load_url, url, 10): url for url in tmpshorts}
try:
for future in concurrent.futures.as_completed(future_to_url, timeout=60):
thisUrl = future_to_url[future]
trueUrl = future.result()
if trueUrl and thisUrl==trueUrl:
existingsummaryUrls.add(trueUrl)
except concurrent.futures._base.TimeoutError:
pass
if not idx%200:
pickle.dump(existingsummaryUrls, open('./data/artworks_tmp/existingsummaryUrls.pck','wb'))
print('@@@@@ Just passed batch '+str(idx)+' at '+time.strftime("%H:%M||%d/%m "))
with concurrent.futures.ThreadPoolExecutor(max_workers=1000) as executor:
for idx,tmpshorts in enumerate(captionShorts):
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(load_url, url, 10): url for url in tmpshorts}
try:
for future in concurrent.futures.as_completed(future_to_url, timeout=60):
thisUrl = future_to_url[future]
trueUrl = future.result()
if trueUrl and thisUrl==trueUrl:
existingcaptionUrls.add(trueUrl)
except concurrent.futures._base.TimeoutError:
pass
if not idx%200:
pickle.dump(existingcaptionUrls, open('./data/artworks_tmp/existingcaptionUrls.pck','wb'))
print('@@@@@ Just passed batch '+str(idx)+' at '+time.strftime("%H:%M||%d/%m "))
pickle.dump(existingsummaryUrls, open('./data/artworks_tmp/existingsummaryUrls.pck','wb'))
pickle.dump(existingcaptionUrls, open('./data/artworks_tmp/existingcaptionUrls.pck','wb'))
elapsed = time.time() - t
print('Elapsed: %.2f seconds' % elapsed)
t = time.time()
| apache-2.0 |
jmmease/pandas | pandas/compat/numpy/function.py | 10 | 13077 | """
For compatibility with numpy libraries, pandas functions or
methods have to accept '*args' and '**kwargs' parameters to
accommodate numpy arguments that are not actually used or
respected in the pandas implementation.
To ensure that users do not abuse these parameters, validation
is performed in 'validators.py' to make sure that any extra
parameters passed correspond ONLY to those in the numpy signature.
Part of that validation includes whether or not the user attempted
to pass in non-default values for these extraneous parameters. As we
want to discourage users from relying on these parameters when calling
the pandas implementation, we want them only to pass in the default values
for these parameters.
This module provides a set of commonly used default arguments for functions
and methods that are spread throughout the codebase. This module will make it
easier to adjust to future upstream changes in the analogous numpy signatures.
"""
from numpy import ndarray
from pandas.util._validators import (validate_args, validate_kwargs,
validate_args_and_kwargs)
from pandas.errors import UnsupportedFunctionCall
from pandas.core.dtypes.common import is_integer, is_bool
from pandas.compat import OrderedDict
class CompatValidator(object):
def __init__(self, defaults, fname=None, method=None,
max_fname_arg_count=None):
self.fname = fname
self.method = method
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
def __call__(self, args, kwargs, fname=None,
max_fname_arg_count=None, method=None):
if args or kwargs:
fname = self.fname if fname is None else fname
max_fname_arg_count = (self.max_fname_arg_count if
max_fname_arg_count is None
else max_fname_arg_count)
method = self.method if method is None else method
if method == 'args':
validate_args(fname, args, max_fname_arg_count, self.defaults)
elif method == 'kwargs':
validate_kwargs(fname, kwargs, self.defaults)
elif method == 'both':
validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
self.defaults)
else:
raise ValueError("invalid validation method "
"'{method}'".format(method=method))
ARGMINMAX_DEFAULTS = dict(out=None)
validate_argmin = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmin',
method='both', max_fname_arg_count=1)
validate_argmax = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmax',
method='both', max_fname_arg_count=1)
def process_skipna(skipna, args):
if isinstance(skipna, ndarray) or skipna is None:
args = (skipna,) + args
skipna = True
return skipna, args
def validate_argmin_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmin' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmin(args, kwargs)
return skipna
def validate_argmax_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmax' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmax(args, kwargs)
return skipna
ARGSORT_DEFAULTS = OrderedDict()
ARGSORT_DEFAULTS['axis'] = -1
ARGSORT_DEFAULTS['kind'] = 'quicksort'
ARGSORT_DEFAULTS['order'] = None
validate_argsort = CompatValidator(ARGSORT_DEFAULTS, fname='argsort',
max_fname_arg_count=0, method='both')
# two different signatures of argsort, this second validation
# for when the `kind` param is supported
ARGSORT_DEFAULTS_KIND = OrderedDict()
ARGSORT_DEFAULTS_KIND['axis'] = -1
ARGSORT_DEFAULTS_KIND['order'] = None
validate_argsort_kind = CompatValidator(ARGSORT_DEFAULTS_KIND, fname='argsort',
max_fname_arg_count=0, method='both')
def validate_argsort_with_ascending(ascending, args, kwargs):
"""
If 'Categorical.argsort' is called via the 'numpy' library, the
first parameter in its signature is 'axis', which takes either
an integer or 'None', so check if the 'ascending' parameter has
either integer type or is None, since 'ascending' itself should
be a boolean
"""
if is_integer(ascending) or ascending is None:
args = (ascending,) + args
ascending = True
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
return ascending
CLIP_DEFAULTS = dict(out=None)
validate_clip = CompatValidator(CLIP_DEFAULTS, fname='clip',
method='both', max_fname_arg_count=3)
def validate_clip_with_axis(axis, args, kwargs):
"""
If 'NDFrame.clip' is called via the numpy library, the third
parameter in its signature is 'out', which can takes an ndarray,
so check if the 'axis' parameter is an instance of ndarray, since
'axis' itself should either be an integer or None
"""
if isinstance(axis, ndarray):
args = (axis,) + args
axis = None
validate_clip(args, kwargs)
return axis
COMPRESS_DEFAULTS = OrderedDict()
COMPRESS_DEFAULTS['axis'] = None
COMPRESS_DEFAULTS['out'] = None
validate_compress = CompatValidator(COMPRESS_DEFAULTS, fname='compress',
method='both', max_fname_arg_count=1)
CUM_FUNC_DEFAULTS = OrderedDict()
CUM_FUNC_DEFAULTS['dtype'] = None
CUM_FUNC_DEFAULTS['out'] = None
validate_cum_func = CompatValidator(CUM_FUNC_DEFAULTS, method='both',
max_fname_arg_count=1)
validate_cumsum = CompatValidator(CUM_FUNC_DEFAULTS, fname='cumsum',
method='both', max_fname_arg_count=1)
def validate_cum_func_with_skipna(skipna, args, kwargs, name):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'dtype', which takes either a
'numpy' dtype or 'None', so check if the 'skipna' parameter is
a boolean or not
"""
if not is_bool(skipna):
args = (skipna,) + args
skipna = True
validate_cum_func(args, kwargs, fname=name)
return skipna
ALLANY_DEFAULTS = OrderedDict()
ALLANY_DEFAULTS['dtype'] = None
ALLANY_DEFAULTS['out'] = None
validate_all = CompatValidator(ALLANY_DEFAULTS, fname='all',
method='both', max_fname_arg_count=1)
validate_any = CompatValidator(ALLANY_DEFAULTS, fname='any',
method='both', max_fname_arg_count=1)
LOGICAL_FUNC_DEFAULTS = dict(out=None)
validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method='kwargs')
MINMAX_DEFAULTS = dict(out=None)
validate_min = CompatValidator(MINMAX_DEFAULTS, fname='min',
method='both', max_fname_arg_count=1)
validate_max = CompatValidator(MINMAX_DEFAULTS, fname='max',
method='both', max_fname_arg_count=1)
RESHAPE_DEFAULTS = dict(order='C')
validate_reshape = CompatValidator(RESHAPE_DEFAULTS, fname='reshape',
method='both', max_fname_arg_count=1)
REPEAT_DEFAULTS = dict(axis=None)
validate_repeat = CompatValidator(REPEAT_DEFAULTS, fname='repeat',
method='both', max_fname_arg_count=1)
ROUND_DEFAULTS = dict(out=None)
validate_round = CompatValidator(ROUND_DEFAULTS, fname='round',
method='both', max_fname_arg_count=1)
SORT_DEFAULTS = OrderedDict()
SORT_DEFAULTS['axis'] = -1
SORT_DEFAULTS['kind'] = 'quicksort'
SORT_DEFAULTS['order'] = None
validate_sort = CompatValidator(SORT_DEFAULTS, fname='sort',
method='kwargs')
STAT_FUNC_DEFAULTS = OrderedDict()
STAT_FUNC_DEFAULTS['dtype'] = None
STAT_FUNC_DEFAULTS['out'] = None
validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS,
method='kwargs')
validate_sum = CompatValidator(STAT_FUNC_DEFAULTS, fname='sort',
method='both', max_fname_arg_count=1)
validate_mean = CompatValidator(STAT_FUNC_DEFAULTS, fname='mean',
method='both', max_fname_arg_count=1)
STAT_DDOF_FUNC_DEFAULTS = OrderedDict()
STAT_DDOF_FUNC_DEFAULTS['dtype'] = None
STAT_DDOF_FUNC_DEFAULTS['out'] = None
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS,
method='kwargs')
TAKE_DEFAULTS = OrderedDict()
TAKE_DEFAULTS['out'] = None
TAKE_DEFAULTS['mode'] = 'raise'
validate_take = CompatValidator(TAKE_DEFAULTS, fname='take',
method='kwargs')
def validate_take_with_convert(convert, args, kwargs):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'axis', which takes either an
ndarray or 'None', so check if the 'convert' parameter is either
an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True
validate_take(args, kwargs, max_fname_arg_count=3, method='both')
return convert
TRANSPOSE_DEFAULTS = dict(axes=None)
validate_transpose = CompatValidator(TRANSPOSE_DEFAULTS, fname='transpose',
method='both', max_fname_arg_count=0)
def validate_transpose_for_generic(inst, kwargs):
try:
validate_transpose(tuple(), kwargs)
except ValueError as e:
klass = type(inst).__name__
msg = str(e)
# the Panel class actual relies on the 'axes' parameter if called
# via the 'numpy' library, so let's make sure the error is specific
# about saying that the parameter is not supported for particular
# implementations of 'transpose'
if "the 'axes' parameter is not supported" in msg:
msg += " for {klass} instances".format(klass=klass)
raise ValueError(msg)
def validate_window_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .{func}() directly instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_rolling_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .rolling(...).{func}() instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_expanding_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .expanding(...).{func}() instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_groupby_func(name, args, kwargs, allowed=None):
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
"""
if allowed is None:
allowed = []
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with groupby. Use .groupby(...)."
"{func}() instead".format(func=name)))
RESAMPLER_NUMPY_OPS = ('min', 'max', 'sum', 'prod',
'mean', 'std', 'var')
def validate_resampler_func(method, args, kwargs):
"""
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
the function signature
"""
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with resample. Use .resample(...)."
"{func}() instead".format(func=method)))
else:
raise TypeError("too many arguments passed in")
| bsd-3-clause |
Eric89GXL/scipy | scipy/spatial/tests/test__plotutils.py | 11 | 2156 | from __future__ import division, print_function, absolute_import
import pytest
from numpy.testing import assert_, assert_array_equal
from scipy._lib._numpy_compat import suppress_warnings
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib import MatplotlibDeprecationWarning
has_matplotlib = True
except Exception:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
@pytest.mark.skipif(not has_matplotlib, reason="Matplotlib not available")
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
with suppress_warnings() as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
with suppress_warnings() as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
voronoi_plot_2d(obj, show_vertices=False)
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
with suppress_warnings() as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| bsd-3-clause |
wangcy6/storm_app | frame/c++/webrtc-master/rtc_tools/py_event_log_analyzer/rtp_analyzer.py | 6 | 12708 | # Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Displays statistics and plots graphs from RTC protobuf dump."""
from __future__ import division
from __future__ import print_function
import collections
import optparse
import os
import sys
import matplotlib.pyplot as plt
import numpy
import misc
import pb_parse
class RTPStatistics(object):
"""Has methods for calculating and plotting RTP stream statistics."""
BANDWIDTH_SMOOTHING_WINDOW_SIZE = 10
PLOT_RESOLUTION_MS = 50
def __init__(self, data_points):
"""Initializes object with data_points and computes simple statistics.
Computes percentages of number of packets and packet sizes by
SSRC.
Args:
data_points: list of pb_parse.DataPoints on which statistics are
calculated.
"""
self.data_points = data_points
self.ssrc_frequencies = misc.NormalizeCounter(
collections.Counter([pt.ssrc for pt in self.data_points]))
self.ssrc_size_table = misc.SsrcNormalizedSizeTable(self.data_points)
self.bandwidth_kbps = None
self.smooth_bw_kbps = None
def PrintHeaderStatistics(self):
print("{:>6}{:>14}{:>14}{:>6}{:>6}{:>3}{:>11}".format(
"SeqNo", "TimeStamp", "SendTime", "Size", "PT", "M", "SSRC"))
for point in self.data_points:
print("{:>6}{:>14}{:>14}{:>6}{:>6}{:>3}{:>11}".format(
point.sequence_number, point.timestamp,
int(point.arrival_timestamp_ms), point.size, point.payload_type,
point.marker_bit, "0x{:x}".format(point.ssrc)))
def PrintSsrcInfo(self, ssrc_id, ssrc):
"""Prints packet and size statistics for a given SSRC.
Args:
ssrc_id: textual identifier of SSRC printed beside statistics for it.
ssrc: SSRC by which to filter data and display statistics
"""
filtered_ssrc = [point for point in self.data_points if point.ssrc
== ssrc]
payloads = misc.NormalizeCounter(
collections.Counter([point.payload_type for point in
filtered_ssrc]))
payload_info = "payload type(s): {}".format(
", ".join(str(payload) for payload in payloads))
print("{} 0x{:x} {}, {:.2f}% packets, {:.2f}% data".format(
ssrc_id, ssrc, payload_info, self.ssrc_frequencies[ssrc] * 100,
self.ssrc_size_table[ssrc] * 100))
print(" packet sizes:")
(bin_counts, bin_bounds) = numpy.histogram([point.size for point in
filtered_ssrc], bins=5,
density=False)
bin_proportions = bin_counts / sum(bin_counts)
print("\n".join([
" {:.1f} - {:.1f}: {:.2f}%".format(bin_bounds[i], bin_bounds[i + 1],
bin_proportions[i] * 100)
for i in range(len(bin_proportions))
]))
def ChooseSsrc(self):
"""Queries user for SSRC."""
if len(self.ssrc_frequencies) == 1:
chosen_ssrc = self.ssrc_frequencies[0][-1]
self.PrintSsrcInfo("", chosen_ssrc)
return chosen_ssrc
ssrc_is_incoming = misc.SsrcDirections(self.data_points)
incoming = [ssrc for ssrc in ssrc_is_incoming if ssrc_is_incoming[ssrc]]
outgoing = [ssrc for ssrc in ssrc_is_incoming if not ssrc_is_incoming[ssrc]]
print("\nIncoming:\n")
for (i, ssrc) in enumerate(incoming):
self.PrintSsrcInfo(i, ssrc)
print("\nOutgoing:\n")
for (i, ssrc) in enumerate(outgoing):
self.PrintSsrcInfo(i + len(incoming), ssrc)
while True:
chosen_index = int(misc.get_input("choose one> "))
if 0 <= chosen_index < len(self.ssrc_frequencies):
return (incoming + outgoing)[chosen_index]
else:
print("Invalid index!")
def FilterSsrc(self, chosen_ssrc):
"""Filters and wraps data points.
Removes data points with `ssrc != chosen_ssrc`. Unwraps sequence
numbers and timestamps for the chosen selection.
"""
self.data_points = [point for point in self.data_points if
point.ssrc == chosen_ssrc]
unwrapped_sequence_numbers = misc.Unwrap(
[point.sequence_number for point in self.data_points], 2**16 - 1)
for (data_point, sequence_number) in zip(self.data_points,
unwrapped_sequence_numbers):
data_point.sequence_number = sequence_number
unwrapped_timestamps = misc.Unwrap([point.timestamp for point in
self.data_points], 2**32 - 1)
for (data_point, timestamp) in zip(self.data_points,
unwrapped_timestamps):
data_point.timestamp = timestamp
def PrintSequenceNumberStatistics(self):
seq_no_set = set(point.sequence_number for point in
self.data_points)
missing_sequence_numbers = max(seq_no_set) - min(seq_no_set) + (
1 - len(seq_no_set))
print("Missing sequence numbers: {} out of {} ({:.2f}%)".format(
missing_sequence_numbers,
len(seq_no_set),
100 * missing_sequence_numbers / len(seq_no_set)
))
print("Duplicated packets: {}".format(len(self.data_points) -
len(seq_no_set)))
print("Reordered packets: {}".format(
misc.CountReordered([point.sequence_number for point in
self.data_points])))
def EstimateFrequency(self, always_query_sample_rate):
"""Estimates frequency and updates data.
Guesses the most probable frequency by looking at changes in
timestamps (RFC 3550 section 5.1), calculates clock drifts and
sending time of packets. Updates `self.data_points` with changes
in delay and send time.
"""
delta_timestamp = (self.data_points[-1].timestamp -
self.data_points[0].timestamp)
delta_arr_timestamp = float((self.data_points[-1].arrival_timestamp_ms -
self.data_points[0].arrival_timestamp_ms))
freq_est = delta_timestamp / delta_arr_timestamp
freq_vec = [8, 16, 32, 48, 90]
freq = None
for f in freq_vec:
if abs((freq_est - f) / f) < 0.05:
freq = f
print("Estimated frequency: {:.3f}kHz".format(freq_est))
if freq is None or always_query_sample_rate:
if not always_query_sample_rate:
print ("Frequency could not be guessed.", end=" ")
freq = int(misc.get_input("Input frequency (in kHz)> "))
else:
print("Guessed frequency: {}kHz".format(freq))
for point in self.data_points:
point.real_send_time_ms = (point.timestamp -
self.data_points[0].timestamp) / freq
point.delay = point.arrival_timestamp_ms - point.real_send_time_ms
def PrintDurationStatistics(self):
"""Prints delay, clock drift and bitrate statistics."""
min_delay = min(point.delay for point in self.data_points)
for point in self.data_points:
point.absdelay = point.delay - min_delay
stream_duration_sender = self.data_points[-1].real_send_time_ms / 1000
print("Stream duration at sender: {:.1f} seconds".format(
stream_duration_sender
))
arrival_timestamps_ms = [point.arrival_timestamp_ms for point in
self.data_points]
stream_duration_receiver = (max(arrival_timestamps_ms) -
min(arrival_timestamps_ms)) / 1000
print("Stream duration at receiver: {:.1f} seconds".format(
stream_duration_receiver
))
print("Clock drift: {:.2f}%".format(
100 * (stream_duration_receiver / stream_duration_sender - 1)
))
total_size = sum(point.size for point in self.data_points) * 8 / 1000
print("Send average bitrate: {:.2f} kbps".format(
total_size / stream_duration_sender))
print("Receive average bitrate: {:.2f} kbps".format(
total_size / stream_duration_receiver))
def RemoveReordered(self):
last = self.data_points[0]
data_points_ordered = [last]
for point in self.data_points[1:]:
if point.sequence_number > last.sequence_number and (
point.real_send_time_ms > last.real_send_time_ms):
data_points_ordered.append(point)
last = point
self.data_points = data_points_ordered
def ComputeBandwidth(self):
"""Computes bandwidth averaged over several consecutive packets.
The number of consecutive packets used in the average is
BANDWIDTH_SMOOTHING_WINDOW_SIZE. Averaging is done with
numpy.correlate.
"""
start_ms = self.data_points[0].real_send_time_ms
stop_ms = self.data_points[-1].real_send_time_ms
(self.bandwidth_kbps, _) = numpy.histogram(
[point.real_send_time_ms for point in self.data_points],
bins=numpy.arange(start_ms, stop_ms,
RTPStatistics.PLOT_RESOLUTION_MS),
weights=[point.size * 8 / RTPStatistics.PLOT_RESOLUTION_MS
for point in self.data_points]
)
correlate_filter = (numpy.ones(
RTPStatistics.BANDWIDTH_SMOOTHING_WINDOW_SIZE) /
RTPStatistics.BANDWIDTH_SMOOTHING_WINDOW_SIZE)
self.smooth_bw_kbps = numpy.correlate(self.bandwidth_kbps, correlate_filter)
def PlotStatistics(self):
"""Plots changes in delay and average bandwidth."""
start_ms = self.data_points[0].real_send_time_ms
stop_ms = self.data_points[-1].real_send_time_ms
time_axis = numpy.arange(start_ms / 1000, stop_ms / 1000,
RTPStatistics.PLOT_RESOLUTION_MS / 1000)
delay = CalculateDelay(start_ms, stop_ms,
RTPStatistics.PLOT_RESOLUTION_MS,
self.data_points)
plt.figure(1)
plt.plot(time_axis, delay[:len(time_axis)])
plt.xlabel("Send time [s]")
plt.ylabel("Relative transport delay [ms]")
plt.figure(2)
plt.plot(time_axis[:len(self.smooth_bw_kbps)], self.smooth_bw_kbps)
plt.xlabel("Send time [s]")
plt.ylabel("Bandwidth [kbps]")
plt.show()
def CalculateDelay(start, stop, step, points):
"""Quantizes the time coordinates for the delay.
Quantizes points by rounding the timestamps downwards to the nearest
point in the time sequence start, start+step, start+2*step... Takes
the average of the delays of points rounded to the same. Returns
masked array, in which time points with no value are masked.
"""
grouped_delays = [[] for _ in numpy.arange(start, stop + step, step)]
rounded_value_index = lambda x: int((x - start) / step)
for point in points:
grouped_delays[rounded_value_index(point.real_send_time_ms)
].append(point.absdelay)
regularized_delays = [numpy.average(arr) if arr else -1 for arr in
grouped_delays]
return numpy.ma.masked_values(regularized_delays, -1)
def main():
usage = "Usage: %prog [options] <filename of rtc event log>"
parser = optparse.OptionParser(usage=usage)
parser.add_option("--dump_header_to_stdout",
default=False, action="store_true",
help="print header info to stdout; similar to rtp_analyze")
parser.add_option("--query_sample_rate",
default=False, action="store_true",
help="always query user for real sample rate")
parser.add_option("--working_directory",
default=None, action="store",
help="directory in which to search for relative paths")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(0)
input_file = args[0]
if options.working_directory and not os.path.isabs(input_file):
input_file = os.path.join(options.working_directory, input_file)
data_points = pb_parse.ParseProtobuf(input_file)
rtp_stats = RTPStatistics(data_points)
if options.dump_header_to_stdout:
print("Printing header info to stdout.", file=sys.stderr)
rtp_stats.PrintHeaderStatistics()
sys.exit(0)
chosen_ssrc = rtp_stats.ChooseSsrc()
print("Chosen SSRC: 0X{:X}".format(chosen_ssrc))
rtp_stats.FilterSsrc(chosen_ssrc)
print("Statistics:")
rtp_stats.PrintSequenceNumberStatistics()
rtp_stats.EstimateFrequency(options.query_sample_rate)
rtp_stats.PrintDurationStatistics()
rtp_stats.RemoveReordered()
rtp_stats.ComputeBandwidth()
rtp_stats.PlotStatistics()
if __name__ == "__main__":
main()
| apache-2.0 |
mcanthony/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtk.py | 69 | 43991 | from __future__ import division
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import matplotlib
from matplotlib import verbose
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import cbook
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def show(mainloop=True):
"""
Show all the figures and enter the gtk main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if mainloop and gtk.main_level() == 0 and \
len(Gcf.get_all_fig_managers())>0:
gtk.main()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTK(thisFig)
manager = FigureManagerGTK(canvas, num)
# equals:
#manager = FigureManagerGTK(FigureCanvasGTK(Figure(*args, **kwargs), num)
return manager
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self._idle_event_id = gobject.idle_add(self.idle_event)
def destroy(self):
#gtk.DrawingArea.destroy(self)
gobject.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "hit", key
FigureCanvasBase.key_press_event(self, key)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "release", key
FigureCanvasBase.key_release_event(self, key)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval <256:
key = chr(event.keyval)
else:
key = None
ctrl = event.state & gdk.CONTROL_MASK
shift = event.state & gdk.SHIFT_MASK
return key
def configure_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
def idle_draw(*args):
self.draw()
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
if is_string_like(filename):
try:
pixbuf.save(filename, format)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def get_default_filetype(self):
return 'png'
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.window.set_title("Figure %d" % num)
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
self.toolbar.destroy()
self.__dict__.clear()
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image_file, callback(str)
toolitems = (
('Home', 'Reset original view', 'home.png', 'home'),
('Back', 'Back to previous view','back.png', 'back'),
('Forward', 'Forward to next view','forward.png', 'forward'),
('Pan', 'Pan axes with left mouse, zoom with right', 'move.png','pan'),
('Zoom', 'Zoom to rectangle','zoom_to_rect.png', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots','subplots.png', 'configure_subplots'),
('Save', 'Save the figure','filesave.png', 'save_figure'),
)
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self._idle_draw_id = 0
def set_message(self, s):
if self._idle_draw_id == 0:
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._imageBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in min(x0,x1), min(y0, y1), w, h]
try: lastrect, imageBack = self._imageBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._imageBack = axrect, drawable.get_image(*axrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
else:
def idle_draw(*args):
drawable.draw_image(gc, imageBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._init_toolbar2_4()
else:
self._init_toolbar2_2()
def _init_toolbar2_2(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.append_space()
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
w = self.append_item(text,
tooltip_text,
'Private',
image,
getattr(self, callback)
)
self.append_space()
self.message = gtk.Label()
self.append_widget(self.message, None, None)
self.message.show()
def _init_toolbar2_4(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win,)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class NavigationToolbar(gtk.Toolbar):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image, callback(str), callback_arg, scroll(bool)
toolitems = (
('Left', 'Pan left with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_BACK, 'panx', -1, True),
('Right', 'Pan right with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_FORWARD, 'panx', 1, True),
('Zoom In X',
'Zoom In X (shrink the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomx', 1, True),
('Zoom Out X',
'Zoom Out X (expand the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomx', -1, True),
(None, None, None, None, None, None,),
('Up', 'Pan up with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_UP, 'pany', 1, True),
('Down', 'Pan down with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_DOWN, 'pany', -1, True),
('Zoom In Y',
'Zoom in Y (shrink the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomy', 1, True),
('Zoom Out Y',
'Zoom Out Y (expand the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomy', -1, True),
(None, None, None, None, None, None,),
('Save', 'Save the figure',
gtk.STOCK_SAVE, 'save_figure', None, False),
)
def __init__(self, canvas, window):
"""
figManager is the FigureManagerGTK instance that contains the
toolbar, with attributes figure, window and drawingArea
"""
gtk.Toolbar.__init__(self)
self.canvas = canvas
# Note: gtk.Toolbar already has a 'window' attribute
self.win = window
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._create_toolitems_2_4()
self.update = self._update_2_4
self.fileselect = FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
self._create_toolitems_2_2()
self.update = self._update_2_2
self.fileselect = FileSelection(title='Save the figure',
parent=self.win)
self.show_all()
self.update()
def _create_toolitems_2_4(self):
# use the GTK+ 2.4 GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
if callback_arg:
tbutton.connect('clicked', getattr(self, callback),
callback_arg)
else:
tbutton.connect('clicked', getattr(self, callback))
if scroll:
tbutton.connect('scroll_event', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
# Axes toolitem, is empty at start, update() adds a menu if >=2 axes
self.axes_toolitem = gtk.ToolItem()
self.insert(self.axes_toolitem, 0)
self.axes_toolitem.set_tooltip (
self.tooltips,
tip_text='Select axes that controls affect',
tip_private = 'Private')
align = gtk.Alignment (xalign=0.5, yalign=0.5, xscale=0.0, yscale=0.0)
self.axes_toolitem.add(align)
self.menubutton = gtk.Button ("Axes")
align.add (self.menubutton)
def position_menu (menu):
"""Function for positioning a popup menu.
Place menu below the menu button, but ensure it does not go off
the bottom of the screen.
The default is to popup menu at current mouse position
"""
x0, y0 = self.window.get_origin()
x1, y1, m = self.window.get_pointer()
x2, y2 = self.menubutton.get_pointer()
sc_h = self.get_screen().get_height() # requires GTK+ 2.2 +
w, h = menu.size_request()
x = x0 + x1 - x2
y = y0 + y1 - y2 + self.menubutton.allocation.height
y = min(y, sc_h - h)
return x, y, True
def button_clicked (button, data=None):
self.axismenu.popup (None, None, position_menu, 0,
gtk.get_current_event_time())
self.menubutton.connect ("clicked", button_clicked)
def _update_2_4(self):
# for GTK+ 2.4+
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
self.axismenu = self._make_axis_menu()
self.menubutton.show_all()
else:
self.menubutton.hide()
self.set_active(range(len(self._axes)))
def _create_toolitems_2_2(self):
# use the GTK+ 2.2 (and lower) GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.append_space()
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
item = self.append_item(text, tooltip_text, 'Private', image,
getattr(self, callback), callback_arg)
if scroll:
item.connect("scroll_event", getattr(self, callback))
self.omenu = gtk.OptionMenu()
self.omenu.set_border_width(3)
self.insert_widget(
self.omenu,
'Select axes that controls affect',
'Private', 0)
def _update_2_2(self):
# for GTK+ 2.2 and lower
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
# set up the axis menu
self.omenu.set_menu( self._make_axis_menu() )
self.omenu.show_all()
else:
self.omenu.hide()
self.set_active(range(len(self._axes)))
def _make_axis_menu(self):
# called by self._update*()
def toggled(item, data=None):
if item == self.itemAll:
for item in items: item.set_active(True)
elif item == self.itemInvert:
for item in items:
item.set_active(not item.get_active())
ind = [i for i,item in enumerate(items) if item.get_active()]
self.set_active(ind)
menu = gtk.Menu()
self.itemAll = gtk.MenuItem("All")
menu.append(self.itemAll)
self.itemAll.connect("activate", toggled)
self.itemInvert = gtk.MenuItem("Invert")
menu.append(self.itemInvert)
self.itemInvert.connect("activate", toggled)
items = []
for i in range(len(self._axes)):
item = gtk.CheckMenuItem("Axis %d" % (i+1))
menu.append(item)
item.connect("toggled", toggled)
item.set_active(True)
items.append(item)
menu.show_all()
return menu
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, button, direction):
'panx in direction'
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
return True
def pany(self, button, direction):
'pany in direction'
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
return True
def zoomx(self, button, direction):
'zoomx in direction'
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
return True
def zoomy(self, button, direction):
'zoomy in direction'
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
return True
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
if gtk.pygtk_version >= (2,4,0):
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = filetypes.items()
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
self.hide()
return filename, self.ext
else:
class FileSelection(gtk.FileSelection):
"""GTK+ 2.2 and lower file selector which remembers the last
file/directory selected
"""
def __init__(self, path=None, title='Select a file', parent=None):
super(FileSelection, self).__init__(title)
if path: self.path = path
else: self.path = os.getcwd() + os.sep
if parent: self.set_transient_for(parent)
def get_filename_from_user(self, path=None, title=None):
if path: self.path = path
if title: self.set_title(title)
self.set_filename(self.path)
filename = None
if self.run() == int(gtk.RESPONSE_OK):
self.path = filename = self.get_filename()
self.hide()
ext = None
if filename is not None:
ext = os.path.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:]
return filename, ext
class DialogLineprops:
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in lines.Line2D.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0):
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
gtk.window_set_default_icon_from_file (
os.path.join (matplotlib.rcParams['datapath'], 'images', icon_filename))
except:
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureManager = FigureManagerGTK
| agpl-3.0 |
RPGOne/Skynet | imbalanced-learn-master/examples/under-sampling/plot_condensed_nearest_neighbour.py | 3 | 1965 | """
===========================
Condensed nearest-neighbour
===========================
An illustration of the condensed nearest-neighbour method.
"""
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Define some color for the plotting
almost_black = '#262626'
palette = sns.color_palette()
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from imblearn.under_sampling import CondensedNearestNeighbour
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply Condensed Nearest Neighbours
cnn = CondensedNearestNeighbour()
X_resampled, y_resampled = cnn.fit_sample(X, y)
X_res_vis = pca.transform(X_resampled)
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')
ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
label="Class #0", alpha=.5, edgecolor=almost_black,
facecolor=palette[0], linewidth=0.15)
ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1],
label="Class #1", alpha=.5, edgecolor=almost_black,
facecolor=palette[2], linewidth=0.15)
ax2.set_title('Condensed nearest neighbour')
plt.show()
| bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/mixture/gmm.py | 68 | 31091 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
EmanuelaBoros/kaggle-national-data-science-bowl- | ndsbkaggle/gen_train.py | 1 | 1994 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage:
gen_train <dir> <labels.txt>
gen_train -h | --help
Options:
-h, --help Show help
"""
from __future__ import print_function
import docopt
import numpy
import utils
import h5py
import features.manual as manual
import features.bow_sift as bow_sift
import sklearn.pipeline as pipeline
def load_train(path):
# Gather paths
paths = list(utils.iter_paths(path))
train_set = []
for train_path in paths:
label = utils.category_from_path(train_path)
train_set.append((label, train_path))
return train_set
def main():
args = docopt.docopt(__doc__)
# Load labels
id2label = []
label2id = {}
with open(args['<labels.txt>'], 'r') as labels_file:
id2label = labels_file.read().split()
for i, label in enumerate(id2label):
label2id[label] = i
# Load path and categories for train dataset
train_set = load_train(args['<dir>'])
if len(train_set) == 0:
return
# Feature extractor
extractor = pipeline.FeatureUnion([
('BowSift', bow_sift.BowSift(n_features=300)),
# ('Manual', manual.ManualFeatures())
])
print("# Read images")
labels, images = zip(*[(l, utils.load_image(p)) for (l, p) in train_set])
print("# Extract features")
features = extractor.fit_transform(images)
n_sample = len(images)
n_features = features.shape[1]
# Create dataset
with h5py.File('train_set.hdf5', 'w', driver='core') as f:
# X = training data
f.create_dataset(
'X',
(n_sample, n_features),
dtype='f',
data=features)
# Y = labels
f.create_dataset(
'Y',
(n_sample,),
dtype='i',
data=[label2id[label] for label in labels])
# Dump features extractor
utils.dump_classifier(extractor, 'feature_extractor.pkl')
if __name__ == "__main__":
main()
| bsd-3-clause |
TiKeil/Master-thesis-LOD | python_files/generate_figures/8.3-8.4_ErrorIndicator.py | 1 | 7653 | # This file is part of the master thesis "Variational crimes in the Localized orthogonal decomposition method":
# https://github.com/TiKeil/Masterthesis-LOD.git
# Copyright holder: Tim Keil
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import matplotlib.pyplot as plt
from gridlod import interp, coef, util, fem, world, linalg
from gridlod.world import World
import femsolverCoarse
import pg_rand
import buildcoef2d
from visualize import drawCoefficient
def result(pglod, world, A, R, f, k, String):
print "------------------------------------- " + String + " -------------------------------------------"
NWorldFine = world.NWorldFine
NWorldCoarse = world.NWorldCoarse
NCoarseElement = world.NCoarseElement
boundaryConditions = world.boundaryConditions
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
#new Coefficient
ANew = R.flatten()
Anew = coef.coefficientFine(NWorldCoarse, NCoarseElement, ANew)
# tolerance = 0
vis, eps = pglod.updateCorrectors(Anew, 0, f, 1, Computing = False)
elemente = np.arange(np.prod(NWorldCoarse))
plt.figure("Error indicators")
plt.plot(elemente,eps,label=String)
plt.ylabel('$e_{u,T}$')
plt.xlabel('Element')
plt.subplots_adjust(left=0.09,bottom=0.09,right=0.99,top=0.99,wspace=0.2,hspace=0.2)
plt.legend(loc='upper right') #Legende
plt.grid()
bg = 0.05 #background
val = 1 #values
#fine World
NWorldFine = np.array([256, 256])
NpFine = np.prod(NWorldFine+1)
#coarse World
NWorldCoarse = np.array([16,16])
NpCoarse = np.prod(NWorldCoarse+1)
#ratio between Fine and Coarse
NCoarseElement = NWorldFine/NWorldCoarse
boundaryConditions = np.array([[0, 0],
[0, 0]])
world = World(NWorldCoarse, NCoarseElement, boundaryConditions)
#righthandside
f = np.ones(NpCoarse)
#coefficient
CoefClass = buildcoef2d.Coefficient2d(NWorldFine,
bg = 0.05,
val = 1,
length = 8,
thick = 8,
space = 8,
probfactor = 1,
right = 1,
down = 0,
diagr1 = 0,
diagr2 = 0,
diagl1 = 0,
diagl2 = 0,
LenSwitch = None,
thickSwitch = None,
equidistant = True,
ChannelHorizontal = None,
ChannelVertical = None,
BoundarySpace = True)
A = CoefClass.BuildCoefficient()
ABase = A.flatten()
plt.figure("OriginalCoefficient")
drawCoefficient(NWorldFine, ABase)
numbers = [2,70,97,153,205]
value1 = 3
R1 = CoefClass.SpecificValueChange( ratio = value1,
Number = numbers,
probfactor = 1,
randomvalue = None,
negative = None,
ShapeRestriction = True,
ShapeWave = None,
Original = True)
plt.figure("Change in value to 3")
drawCoefficient(NWorldFine, R1)
value2 = 50
R2 = CoefClass.SpecificValueChange( ratio = value2,
Number = numbers,
probfactor = 1,
randomvalue = None,
negative = None,
ShapeRestriction = True,
ShapeWave = None,
Original = True)
plt.figure("Change in value to 50")
drawCoefficient(NWorldFine, R2)
D = CoefClass.SpecificVanish( Number = numbers,
probfactor = 1,
PartlyVanish = None,
Original = True)
plt.figure("Disappearance")
drawCoefficient(NWorldFine, D)
E2 = CoefClass.SpecificMove( Number = numbers,
steps = 3,
randomstep = None,
randomDirection = None,
Right = 0,
BottomRight = 0,
Bottom = 0,
BottomLeft = 0,
Left = 0,
TopLeft = 1,
Top = 0,
TopRight = 0,
Original = True)
plt.figure("Shift one step")
drawCoefficient(NWorldFine, E2)
E3 = CoefClass.SpecificMove( Number = numbers,
steps = 7,
randomstep = None,
randomDirection = None,
Right = 0,
BottomRight = 0,
Bottom = 0,
BottomLeft = 0,
Left = 0,
TopLeft = 1,
Top = 0,
TopRight = 0,
Original = True)
plt.figure("Shift two steps")
drawCoefficient(NWorldFine, E3)
# precomputations
NWorldFine = world.NWorldFine
NWorldCoarse = world.NWorldCoarse
NCoarseElement = world.NCoarseElement
boundaryConditions = world.boundaryConditions
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
#interpolant
IPatchGenerator = lambda i, N: interp.L2ProjectionPatchMatrix(i, N, NWorldCoarse, NCoarseElement, boundaryConditions)
#old Coefficient
Aold = coef.coefficientFine(NWorldCoarse, NCoarseElement, ABase)
k = 5
pglod = pg_rand.VcPetrovGalerkinLOD(Aold, world, k, IPatchGenerator, 0)
pglod.originCorrectors(clearFineQuantities=False)
# Change in value 1
result(pglod ,world, A, R1, f, k, 'Change in value to' + str(value1))
# Change in value 2
result(pglod ,world, A, R2, f, k, 'Change in value to' + str(value2))
# Disappearance
result(pglod, world, A, D, f, k, 'Disappearance')
# Shift one step
result(pglod, world, A, E2, f, k, 'Shift one step')
# Shift two steps
result(pglod, world, A, E3, f, k, 'Shift two steps')
plt.show() | apache-2.0 |
phamngtuananh/Singaboat_RobotX2016 | robotx_nav/nodes/task1_toplevel.py | 3 | 9161 | #!/usr/bin/env python
""" task 1:
-----------------
Created by Ren Ye @ 2016-11-06
Authors: Ren Ye, Reinaldo
-----------------
<put the descriptions from robotx.org pdf file>
<put the algorithms in natural language, can use bullet points, best is to use markdown format>
<if you have plan b, can put it here>
## example ##
+ Go to start point
+ Rotate in position to detect red_1 and green_1 buoys
+ Plot perpendicular waypoints wrt to position of red and green buoys
+ Move towards waypoints move_base_forward
+ meanwhile requesting positions of red_2 and green_2
+ shutdown move_base_forward, create new move_base_forward towards mid of red_2 and green_2
<change log put here>
### @ 2016-11-06 ###
+ create template
renye's approach:
1. drive to gps waypoint
2. slowly in place rotate # noneed
3. detect red and green totems by any camera
4. rotate to bow to red and green totems
5. roi of red in bow/left and roi of green in bow/right, calculate center
6. drive until roi vanishes from both bow cameras, detect totem from port and starboard
7. see new roi from bow
8. drive with 5 and 6
reinaldo's approach:
1. fill bucket of markers array until full
2. do k-means clustering to differentiate monocolor totems
3. get closest pairs
4. plan based on pairs, replan if new plan is far from old plan
5. loop to 2.
6. terminate if displacement from start to end > termination_distance
"""
import rospy
import multiprocessing as mp
import math
import time
import numpy as np
import os
import tf
from sklearn.cluster import KMeans
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose
from visualization_msgs.msg import MarkerArray, Marker
from move_base_forward import Forward
from move_base_force_cancel import ForceCancel
from tf.transformations import euler_from_quaternion
from nav_msgs.msg import Odometry
def constant_heading(goal):
constant_obj = Forward(nodename="constant_heading", target=goal, waypoint_separation=5, is_relative=False)
def cancel_forward():
os.system('rosnode kill constant_heading')
class PassGates(object):
pool = mp.Pool()
x0, y0, yaw0= 0, 0, 0
MAX_DATA=30
markers_array=MarkerArray()
red_totem=np.zeros((MAX_DATA, 2)) #unordered list
green_totem=np.zeros((MAX_DATA, 2))
red_centers=np.zeros((2, 2)) #ordered list of centers x, y
green_centers=np.zeros((2, 2))
red_position=np.zeros((2, 2)) #ordered list of centers x, y
green_position=np.zeros((2, 2))
red_counter=0
green_counter=0
replan_min=5
termination_displacement=60
def __init__(self):
print("starting task 1")
rospy.init_node('task_1', anonymous=True)
rospy.Subscriber("/filtered_marker_array", MarkerArray, self.marker_callback, queue_size = 50)
self.marker_pub= rospy.Publisher('waypoint_markers', Marker, queue_size=5)
self.odom_received = False
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
print("odom received")
init_position =np.array([self.x0, self.y0, 0])
prev_target=np.array([self.x0, self.y0, 0])
while(self.red_counter<self.MAX_DATA and self.green_counter<self.MAX_DATA):
#wait for data bucket to fill up
time.sleep(1)
print("bucket full")
while not rospy.is_shutdown():
self.matrix_reorder()
print("reorder complete")
target = self.plan_waypoint()
print(target)
if self.euclid_distance(target, prev_target)>self.replan_min:
#replan
#force cancel
self.pool.apply(cancel_forward)
#plan new constant heading
print("replan")
self.pool.apply_async(constant_heading, args = (target, ))
prev_target=target
else:
pass
#termination condition
if self.euclid_distance(np.array([self.x0, self.y0, 0]), init_position)>self.termination_displacement:
self.pool.apply(cancel_forward)
print("Task 1 Completed")
break
time.sleep(1)
self.pool.close()
self.pool.join()
def plan_waypoint(self):
distance=20
dis_red=1000
dis_green=1000
#find closest available totem pairs
for m in self.red_position:
if self.distance_from_boat(m) < dis_red:
nearest_red=m
dis_red=self.distance_from_boat(m)
for n in self.green_position:
if self.distance_from_boat(n) < dis_green:
nearest_green=n
dis_green=self.distance_from_boat(n)
#plan
dis=nearest_red-nearest_green
[x_center, y_center]=[(nearest_red[0]+nearest_green[0])/2, (nearest_red[1]+nearest_green[1])/2]
if math.sqrt(dis.dot(dis.T)) <20:
theta=math.atan2(math.sin(math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.pi/2), math.cos(math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.pi/2))
#theta = math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.pi/2
else:
theta = math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.atan2(10,30)
return np.array([x_center+distance*math.cos(theta), y_center+distance*math.sin(theta), theta])
def distance_from_boat(self, target):
return math.sqrt((target[0]-self.x0)**2+(target[1]-self.y0)**2)
def euclid_distance(self, target1, target2):
return math.sqrt((target1[0]-target2[0])**2+(target1[1]-target2[1])**2)
def is_complete(self):
pass
def marker_callback(self, msg):
if len(msg.markers)>0:
for i in range(len(msg.markers)):
if msg.markers[i].type == 3:
#may append more than 1 markers
if msg.markers[i].id == 0:
self.red_totem[self.red_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.red_counter+=1
elif msg.markers[i].id == 1:
self.green_totem[self.green_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.green_counter+=1
else:
pass
# list is full
if (self.red_counter>self.MAX_DATA):
red_kmeans = KMeans(n_clusters=2).fit(self.red_totem)
self.red_centers=red_kmeans.cluster_centers_
if(self.green_counter>self.MAX_DATA):
green_kmeans = KMeans(n_clusters=2).fit(self.green_totem)
self.green_centers=green_kmeans.cluster_centers_
#visualize markers in rviz
for i in range(len(msg.markers)):
self.marker_pub.publish(msg.markers[i])
def matrix_reorder(self):
if self.red_centers[0].dot(self.red_centers[0].T)< self.red_centers[1].dot(self.red_centers[1].T):
self.red_position=self.red_centers
else:
self.red_position[0]=self.red_centers[1]
self.red_position[1]=self.red_centers[0]
if self.green_centers[0].dot(self.green_centers[0].T)< self.green_centers[1].dot(self.green_centers[1].T):
self.green_position=self.green_centers
else:
self.green_position[0]=self.green_centers[1]
self.green_position[1]=self.green_centers[0]
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received = True
if __name__ == '__main__':
try:
PassGates()
# stage 1: gps
except rospy.ROSInterruptException:
rospy.loginfo("Task 1 Finished")
| gpl-3.0 |
griffinfoster/pulsar-polarization-sims | scripts/plotValvsIXR.py | 1 | 11154 | #!/usr/bin/env python
"""
"""
import os,sys
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import pylab as p
import cPickle as pkl
from scipy import interpolate
matplotlib.rc('xtick',labelsize=25)
matplotlib.rc('ytick',labelsize=25)
modeTitle=['Total Intensity','Invariant Interval','Matrix Template Matching']
fs=27 #fontsize
import numpy
def smooth(x,window_len=31,window='hanning'):
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=numpy.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=numpy.ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(w/w.sum(),s,mode='valid')
return y
if __name__ == "__main__":
from optparse import OptionParser
o = OptionParser()
o.set_usage('%prog [options] [pklReduceDict.py DICT]')
o.set_description(__doc__)
o.add_option('-l','--leak',dest='leak', action='store_true',
help='Plot in terms of polarization leakage instead of IXR')
o.add_option('-s', '--savefig', dest='savefig', default=None,
help='Save figure in a format based on name extension')
o.add_option('-S','--show',dest='show', action='store_true',
help='Show the plot')
o.add_option('--snr',dest='snr',default=100,type='int',
help='SNR value to use (rounds to nearest int value), default: 100')
o.add_option('--info',dest='info',action='store_true',
help='Print parameter information in the dictionary and exit')
o.add_option('--lines',dest='lines',action='store_true',
help='Instead of fill plots, plot the individual lines')
o.add_option('--ixrlines',dest='ixrlines',action='store_true',
help='Instead of fill plots, plot the individual IXR lines')
o.add_option('--dJmax',dest='dJmax',default=0.15,type='float',
help='Maximum calibration error to use, default: 0.15')
opts, args = o.parse_args(sys.argv[1:])
print 'Loading PKL file'
reduceDict=pkl.load(open(args[0]))
if opts.info:
snrs=[]
deltaJs=[]
ixrs=[]
for key,val in reduceDict.iteritems():
snrs.append(key[1])
deltaJs.append(key[2]*100.)
ixrs.append(10.*np.log10(1./(key[3]**2)))
snrs=np.array(snrs)
deltaJs=np.array(deltaJs)
ixrs=np.array(ixrs)
print 'SNR:', np.unique(snrs)
print 'delta J (\%):',np.unique(deltaJs)
print 'IXR (dB):', np.unique(ixrs)
exit()
#Total Intensity
ixrdbs0=[]
polLeakdbs0=[]
deltaJs0=[]
rmsVals0=[]
#Invariant Interval
ixrdbs1=[]
polLeakdbs1=[]
deltaJs1=[]
rmsVals1=[]
#MTM cal
ixrdbs2cal=[]
polLeakdbs2cal=[]
deltaJs2cal=[]
rmsVals2cal=[]
#MTM uncal
ixrdbs2uncal=[]
polLeakdbs2uncal=[]
deltaJs2uncal=[]
rmsVals2uncal=[]
for key,val in reduceDict.iteritems():
if int(key[1])==opts.snr and key[2] < opts.dJmax: #SNR mode and dJ max selection
deltaJ=key[2]*100.
polLeakdb=10.*np.log10((key[3]**2))
ixrdb=10.*np.log10(1./(key[3]**2))
if key[0]==0 and key[4].startswith('cal'):
ixrdbs0.append(ixrdb)
deltaJs0.append(deltaJ)
polLeakdbs0.append(polLeakdb)
rmsVals0.append(val['rms'])
#rmsVals0.append(val['chi2'])
#rmsVals0.append(val['avgSigma'])
elif key[0]==1 and key[4].startswith('cal'):
ixrdbs1.append(ixrdb)
deltaJs1.append(deltaJ)
polLeakdbs1.append(polLeakdb)
rmsVals1.append(val['rms'])
#rmsVals1.append(val['chi2'])
#rmsVals1.append(val['avgSigma'])
elif key[0]==2:
if key[4].startswith('cal'):
ixrdbs2cal.append(ixrdb)
deltaJs2cal.append(deltaJ)
polLeakdbs2cal.append(polLeakdb)
rmsVals2cal.append(val['rms'])
else:
ixrdbs2uncal.append(ixrdb)
deltaJs2uncal.append(deltaJ)
polLeakdbs2uncal.append(polLeakdb)
rmsVals2uncal.append(val['rms'])
def fillPlotter(ixrdbs,polLeakdbs,deltaJs,rmsVals):
ixrdbs=np.array(ixrdbs)
polLeakdbs=np.array(polLeakdbs)
deltaJs=np.array(deltaJs)
rmsVals=np.array(rmsVals)
polLeakVals=np.unique(polLeakdbs)
sortIdx=np.argsort(polLeakVals)
avgRMS=np.zeros_like(polLeakVals)
minRMS=np.zeros_like(polLeakVals)
maxRMS=np.zeros_like(polLeakVals)
for pid,pval in enumerate(polLeakVals):
rmsValsPolLeak=[]
for pid0,pval0 in enumerate(polLeakdbs):
if pval==pval0: rmsValsPolLeak.append(rmsVals[pid0])
avgRMS[pid]=np.average(np.array(rmsValsPolLeak))
minRMS[pid]=np.min(np.array(rmsValsPolLeak))
maxRMS[pid]=np.max(np.array(rmsValsPolLeak))
polLeakVals=polLeakVals[sortIdx]
maxRMS=maxRMS[sortIdx]
minRMS=minRMS[sortIdx]
#interpolate to smooth out curves
fmax=interpolate.interp1d(polLeakVals,maxRMS,kind='linear')
fmin=interpolate.interp1d(polLeakVals,minRMS,kind='linear')
interpPolLeak=np.linspace(polLeakVals[0],polLeakVals[-1],400)
window_len=31
return interpPolLeak,smooth(fmax(interpPolLeak),window_len=window_len)[:-30],smooth(fmin(interpPolLeak),window_len=window_len)[:-30]
def linePlotter(ixrdbs,polLeakdbs,deltaJs,rmsVals):
ixrdbs=np.array(ixrdbs)
polLeakdbs=np.array(polLeakdbs)
deltaJs=np.array(deltaJs)
rmsVals=np.array(rmsVals)
deltaJVals=np.unique(deltaJs)
rmsLines=[]
polLeakLines=[]
for dJ in deltaJVals:
idx=np.argwhere(deltaJs==dJ)
subPolLeak=polLeakdbs[idx][:,0]
subRmsVals=rmsVals[idx][:,0]
sortIdx=np.argsort(subPolLeak)
rmsLines.append(subRmsVals[sortIdx])
polLeakLines.append(subPolLeak[sortIdx])
return polLeakLines,rmsLines,deltaJVals
def ixrLinePlotter(ixrdbs,polLeakdbs,deltaJs,rmsVals):
ixrdbs=np.array(ixrdbs)
polLeakdbs=np.array(polLeakdbs)
deltaJs=np.array(deltaJs)
rmsVals=np.array(rmsVals)
polLeakVals=np.unique(polLeakdbs)
rmsLines=[]
dJLines=[]
for pid,pval in enumerate(polLeakVals):
idx=np.argwhere(polLeakdbs==pval)
subdJs=deltaJs[idx][:,0]
subRmsVals=rmsVals[idx][:,0]
sortIdx=np.argsort(subdJs)
rmsLines.append(subRmsVals[sortIdx])
dJLines.append(subdJs[sortIdx])
return dJLines,rmsLines,polLeakVals
fig=p.figure()
fig.set_size_inches(9.,6.)
ax=fig.add_subplot(1,1,1)
if opts.lines:
polLeakLines,rmsLines,deltaJVals=linePlotter(ixrdbs0,polLeakdbs0,deltaJs0,rmsVals0)
#color setup
cVals=np.log10(np.array(rmsLines)[:,0])
cVals-=np.min(cVals)
cVals/=np.max(cVals)
textStep=0.
for pid,lid,did,cid in zip(polLeakLines,rmsLines,deltaJVals,cVals):
if did==23.0: continue
p.plot(pid,lid,color=(cid,0.,1-cid))
if textStep%2==0: p.text(pid[int(pid.shape[0]*.4)]-textStep,lid[int(lid.shape[0]*.4)],'%0.f%%'%did,verticalalignment='center')
textStep+=1.
p.xlim(-30,0)
elif opts.ixrlines:
dJLines,rmsLines,polLeakVals=ixrLinePlotter(ixrdbs0,polLeakdbs0,deltaJs0,rmsVals0)
#color setup
cVals=np.log10(np.array(rmsLines)[:,-1])
cVals-=np.min(cVals)
cVals/=np.max(cVals)
for djid,lid,pid,cid in zip(dJLines,rmsLines,polLeakVals,cVals):
if pid > -1. and pid < -0.1: #hack
templid=lid
templid[-2]=(templid[-3]+templid[-1])/2.
p.plot(djid,templid,color=(cid,0.,1-cid))
else: p.plot(djid,lid,color=(cid,0.,1-cid))
if pid > -0.1: lblStr='%0.f dB'%(-1.*pid)
elif (pid >-30. and pid < -11) or pid < -31: lblStr=''
else: lblStr='%0.f dB'%(pid)
p.text(djid[int(djid.shape[0]*.25)],lid[int(lid.shape[0]*.25)],lblStr,verticalalignment='center')
p.gca().invert_xaxis()
else:
polLeakVals,maxRMS0,minRMS0=fillPlotter(ixrdbs0,polLeakdbs0,deltaJs0,rmsVals0)
p.fill_between(polLeakVals,maxRMS0,y2=minRMS0,edgecolor='none',facecolor=(1.,0.5,0.5,1.))
#p.fill_between(polLeakVals,maxRMS0,y2=minRMS0,edgecolor='black',linestyle='solid',facecolor=(1.,0.5,0.5,1.))
polLeakVals,maxRMS1,minRMS1=fillPlotter(ixrdbs1,polLeakdbs1,deltaJs1,rmsVals1)
#p.fill_between(polLeakVals,maxRMS1,y2=minRMS1,edgecolor='none',facecolor=(0.5,0.5,0.5,.9))
p.fill_between(polLeakVals,maxRMS1,y2=minRMS1,edgecolor='black',linestyle='dashed',facecolor=(0.5,0.5,0.5,.9))
#p.fill_between(polLeakVals,maxRMS1,y2=minRMS1,edgecolor='none',facecolor=(0.5,0.5,0.5,.9), hatch='+')
polLeakVals,maxRMS2,minRMS2=fillPlotter(ixrdbs2cal,polLeakdbs2cal,deltaJs2cal,rmsVals2cal)
#p.fill_between(polLeakVals,maxRMS2,y2=minRMS2,edgecolor='none',facecolor=(1.,0.73,0.25,.9))
p.fill_between(polLeakVals,maxRMS2,y2=minRMS2,edgecolor='black',linestyle='dotted',facecolor=(1.,0.73,0.25,.9))
#p.fill_between(polLeakVals,maxRMS2,y2=minRMS2,edgecolor='none',facecolor=(1.,0.73,0.25,.9), hatch='x')
polLeakVals,maxRMS3,minRMS3=fillPlotter(ixrdbs2uncal,polLeakdbs2uncal,deltaJs2uncal,rmsVals2uncal)
#p.fill_between(polLeakVals,maxRMS3,y2=minRMS3,edgecolor='none',facecolor=(0.5,0.63,1.,.9))
p.fill_between(polLeakVals,maxRMS3,y2=minRMS3,edgecolor='black',linestyle='dashdot',facecolor=(0.5,0.63,1.,.9))
#p.fill_between(polLeakVals,maxRMS3,y2=minRMS3,edgecolor='none',facecolor=(0.5,0.63,1.,.9), hatch='/')
p.xlim(-30,0)
if opts.leak: p.xlabel('polarization leakage (dB)',fontsize=fs)
else: p.xlabel('IXR (dB)',fontsize=fs)
p.ylabel('rms ($\mu$s)',fontsize=fs)
if opts.ixrlines: p.xlabel('Calibration Error (%)',fontsize=fs)
ax.set_yscale('log')
#maxPolLeak=-3.
#p.xlim(np.min(np.array(polLeakVals)),np.max(np.array(polLeakVals))+maxPolLeak)
#minIdx=(np.abs(np.array(polLeakVals)-maxPolLeak)).argmin()
#maxIdx=np.array(polLeakVals).argmin()
#p.ylim(min(minRMS1[maxIdx],minRMS2[maxIdx],minRMS3[maxIdx])-.1,max(maxRMS1[minIdx],maxRMS2[minIdx],maxRMS3[minIdx]))
#p.xlim()
#p.ylim()
p.gca().invert_xaxis()
if opts.show: p.show()
if not(opts.savefig is None):
p.savefig(opts.savefig, bbox_inches='tight')
| mit |
mjudsp/Tsallis | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
xyguo/scikit-learn | examples/svm/plot_svm_regression.py | 120 | 1520 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
Xeralux/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 30 | 70017 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
global_step = classifier.get_variable_value('global_step')
if global_step == 100:
# Expected is 100, but because of the global step increment bug, is 50.
# Occasionally, step increments one more time due to a race condition,
# reaching 51 steps.
self.assertIn(step_counter.steps, [50, 51])
else:
# Occasionally, training stops when global_step == 102, due to a race
# condition. In addition, occasionally step increments one more time due
# to a race condition reaching 52 steps.
self.assertIn(step_counter.steps, [51, 52])
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=110)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=110)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
i-Zaak/cyclos_networks | cyclos_networks/stats.py | 2 | 7853 | import matplotlib.pyplot as plt
import numpy as np
import filter as letsf
from sets import Set
def account_ballance(trans):
import datetime as dt
import collections
bals = {}
first_date = None
last_day = 0
for row in enumerate(trans[:,(1,2,4,5)]):
from_id, to_id, datestr, brks = row[1]
date = dt.datetime.strptime(datestr[:10], '%Y-%m-%d').date()
brks = float(brks)
if first_date is None:
first_date = date
if bals.has_key(from_id):
bals[from_id][0].append(bals[from_id][0][-1] - brks)
else:
bals[from_id] = ([],[]) # (ballance,day)
bals[from_id][0].append(-brks)
bals[from_id][1].append((date-first_date).days)
if bals.has_key(to_id):
bals[to_id][0].append(bals[to_id][0][-1] + brks)
else:
bals[to_id] = ([],[])
bals[to_id][0].append(brks)
bals[to_id][1].append((date-first_date).days)
last_day = bals[to_id][1][-1]
for bal in bals:
bals[bal][0].append(bals[bal][0][-1])
bals[bal][1].append(last_day)
return bals
def plot_bals(bals):
fig = plt.figure()
ax = fig.add_subplot(111)
lines = []
for bal in bals:
line, = ax.step(bals[bal][1],bals[bal][0],where='post',label=bal)
lines.append(line)
leg = ax.legend(loc='upper left')
lined = dict()
lined2 = dict()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
origline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
lined2[origline] = legline
def onpick(event):
print event
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
if lined.has_key(event.artist):
legline = event.artist
origline = lined[legline]
else:
origline = event.artist
legline = lined2[origline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
fig.canvas.draw()
fig.canvas.mpl_connect('pick_event', onpick)
plt.show()
def plot_bals_matrix(bals,filename='analyza/balances.png'):
fig = plt.figure(figsize=(20, 42), dpi=100)
#splot = [19,5,0]
#splot = [47,2,0]
splot = [31,3,0]
mx = 0
miny = 0
maxy = 0
for i in bals:
n = bals[i][1][-1]
l = bals[i][0][-1]
if n > mx:
mx = n
if miny > l:
miny = l
if maxy < l:
maxy = l
for bal in sorted(bals.keys(), key=lambda item: int(item)):
splot[2] = splot[2] + 1
ax = fig.add_subplot(splot[0], splot[1], splot[2])
ax.step(bals[bal][1],bals[bal][0],where='post',label=bal)
ax.set_xlim([0,mx])
ax.set_ylim([miny,maxy])
ax.axhline(linewidth=1, color='black')
leg = ax.legend(loc='upper left')
plt.savefig(filename)
def plot_bals_diff_matrix(bals1, bals2, filename='analyza/balances_diff.png'):
fig = plt.figure(figsize=(20, 42), dpi=100)
#splot = [19,5,0]
#splot = [47,2,0]
splot = [31,3,0]
mx = 0
miny = 0
maxy = 0
for i in bals1:
n = bals1[i][1][-1]
l = bals1[i][0][-1]
if n > mx:
mx = n
if miny > l:
miny = l
if maxy < l:
maxy = l
for i in bals2:
n = bals2[i][1][-1]
l = bals2[i][0][-1]
if n > mx:
mx = n
if miny > l:
miny = l
if maxy < l:
maxy = l
for bal in sorted(bals1.keys(), key=lambda item: int(item)):
if not bals2.has_key(bal):
continue
splot[2] = splot[2] + 1
ax = fig.add_subplot(splot[0], splot[1], splot[2])
ax.step(bals1[bal][1],bals1[bal][0],where='post',label=bal,color='blue')
ax.step(bals2[bal][1],bals2[bal][0],where='post',label=bal+"'", color='red')
ax.set_xlim([0,mx])
ax.set_ylim([miny,maxy])
ax.axhline(linewidth=1, color='black')
leg = ax.legend(loc='upper left')
plt.savefig(filename)
def plot_bals_diffs(bals1, bals2, nodes, filename='analyza/ballances_33.pdf'):
#fig = plt.figure(figsize=(8, 4), dpi=100)
fig = plt.figure(figsize=(8, 4))
nrows = np.ceil(len(nodes)/2.0)
mx = 0
miny = 0
maxy = 0
for i in bals1:
n = bals1[i][1][-1]
l = bals1[i][0][-1]
if n > mx:
mx = n
if miny > l:
miny = l
if maxy < l:
maxy = l
for i in bals2:
n = bals2[i][1][-1]
l = bals2[i][0][-1]
if n > mx:
mx = n
if miny > l:
miny = l
if maxy < l:
maxy = l
for i,bal in enumerate(nodes):
ax = fig.add_subplot(nrows, 2, i)
ax.plot(bals1[bal][1],bals1[bal][0],label=bal,linestyle='--',color='blue', drawstyle='steps-post', linewidth=3)
ax.plot(bals2[bal][1],bals2[bal][0],label=bal+"'", linestyle='-.',color='red', drawstyle='steps-post', linewidth=3)
ax.set_xlim([0,mx])
#ax.set_ylim([miny,maxy])
ax.axhline(linewidth=1, color='black')
#leg = ax.legend(loc='lower left')
ax.set_title('account ' + bal)
ax.set_xlabel('time [days]')
ax.set_ylabel('ballance [BRK]')
plt.setp(ax.get_xticklabels(), rotation='vertical', fontsize=10)
plt.setp(ax.get_yticklabels(), fontsize=10)
plt.tight_layout()
plt.savefig(filename, format='pdf')
def plot_cat_hist(cat_vol, cat_label):
fig = plt.figure(figsize=(20,20),dpi=100)
pos = np.arange(len(cat_vol.keys())) + 0.5
plt.bar(pos, cat_vol.values())
plt.xticks(pos+0.5, cat_label.values())
fig.autofmt_xdate()
plt.show()
def total_flow(trans):
if trans.shape[0] > 0:
return sum(map(float,trans[:,5]))
else:
return 0.0
def flow_stats(trans):
print "month\tvolume\tcount\tmembers"
for y in range(2011,2014):
print y
print
vrl = letsf.month_filters(y)
for k, vr in vrl.iteritems():
month_trans = trans[vr(trans[:,4]),:]
print "%i\t%f\t%d\t%d" % (k, total_flow(month_trans), month_trans.shape[0] , np.unique(month_trans[:,(1,2)]).shape[0])
def flow_cat_stats(trans, cats):
print "mesic:" + ':'.join(map(lambda x: x[0],cats))
yflows = {}
for y in range(2011,2014):
print y
print
vrl = letsf.month_filters(y)
for k, vr in vrl.iteritems():
flows = {}
for cat in cats:
t = trans[trans[:,30] == cat[1],:]
flows[cat[1]] = total_flow(t[vr(t[:,4]),:])
print '%i: %s' % (k, ','.join(map(lambda b: str(flows[b[1]]) ,cats)))
yflows[(y,k)] = flows
return yflows
def flow_cat_count_stats(trans, cats):
print "mesic:" + ':'.join(map(lambda x: x[0],cats))
yflows = {}
for y in range(2011,2014):
print y
print
vrl = letsf.month_filters(y)
for k, vr in vrl.iteritems():
flows = {}
for cat in cats:
t = trans[trans[:,30] == cat[1],:]
flows[cat[1]] = t[vr(t[:,4]),:].shape[0]
print '%i: %s' % (k, ','.join(map(lambda b: str(flows[b[1]]) ,cats)))
yflows[(y,k)] = flows
return yflows
def diff_bals(bals1, bals2):
f1 = map(lambda x:(x,bals1[x][0][-1]), bals2)
f2 = map(lambda x:(x,bals2[x][0][-1]), bals2)
diff = map(lambda x: (x[0][0], x[1][1]-x[0][1]) ,zip(f1,f2))
return diff
| gpl-2.0 |
xuyongzhi/scan_volume | src/rotate3D/scripts/rotate_to_3D.py | 1 | 7338 | #!/usr/bin/env python
import rospy
import rosbag
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointField
from std_msgs.msg import Int64
from laser_geometry import LaserProjection
import numpy as np
import matplotlib.pyplot as plt
import math
import os
BASE_DIR = os.path.dirname( os.path.abspath(__file__) )
# each scan is along x axis
# raw z is negative
def rotate_2d ( angle ):
R = np.array( [ [ np.cos(angle), -np.sin( angle ) ],[ np.sin( angle ), np.cos( angle ) ] ] )
return R
class RotateTo3D:
'''
self.status: 'waitting' --start--> 'scanning' --stop--> 'waitting'
'''
def __init__(self):
self.separate_models = False
self.auto_pub_ref_at_frame = 5
pi_angle = 41
speed = 1.0 * math.pi / pi_angle
fre = 49.5
self.increment_theta = 1.0 * speed / fre
self.z0_offset = 4 * 0.01
#self.z0_offset = 10 * 0.01
#self.z0_offset = 0 * 0.01
self.status = 'waiting'
self.pcl_3d = None
self.all_3d_points_ls = []
self.scanN = 0
self.theta = math.pi * 0.2
self.pcl_n = 0
self.pcl_3d_pub = rospy.Publisher('pcl_3d',PointCloud2,queue_size=10)
self.fig_dif = plt.figure()
self.ax_dif = self.fig_dif.add_subplot(111)
self.received_n = 0
res_path = os.path.join( BASE_DIR,'3d_res' )
if not os.path.exists( res_path ):
os.makedirs( res_path )
self.res_bag_name = os.path.join( res_path, 'pcl_3d-zofs_%d-piangle_%d-fre_%d.bag'%(self.z0_offset*100, pi_angle, fre*10) )
self.pcl3d_bag = rosbag.Bag( self.res_bag_name,'w')
rospy.loginfo( 'res path:%s'%(self.res_bag_name) )
def start(self):
self.status = 'start'
self.scanN = 0
self.pcl_3d = None
self.all_3d_points_ls = []
rospy.loginfo('received sart command')
def stop(self):
self.status = 'stop'
rospy.loginfo('received stop command, theta: %0.2f'%(self.theta*180.0/math.pi))
def from_2D_to_3D( self, point_2d ):
x0 = point_2d[0]
y0 = point_2d[1]
self.theta = theta = self.scanN * self.increment_theta
xy = np.matmul( rotate_2d( self.theta ), np.array( [[y0],[self.z0_offset]] ) )
point_3d = [ xy[0,0], xy[1,0], x0, point_2d[3], point_2d[4] ]
return point_3d
#def from_2D_to_3D( self, point_2d ):
# x0 = point_2d[1]
# y0 = point_2d[0]
# self.theta = theta = self.scanN * self.increment_theta
# x = x0 * math.cos(theta)
# y = -x0 * math.sin(theta)
# z = y0
# point_3d = [x, y, z, point_2d[3], point_2d[4]]
# return point_3d
def add_data( self, pcl_LaserScan, dif_start=None, dif_end=None ) :
gen_data = pc2.read_points(pcl_LaserScan, field_names=None, skip_nans=True)
curscan_points = []
#if self.pcl_3d != None:
# gen_trunk = pc2.read_points(self.pcl_3d, field_names=None,skip_nans=True)
# for p in gen_trunk:
# curscan_points.append(list(p))
for idx, p in enumerate(gen_data):
if dif_start==None or ( idx >= dif_start and idx <= dif_end ):
point_2d = list(p) #[ x,y,z,?,? ] z==0
point_3d = self.from_2D_to_3D( point_2d )
curscan_points.append(point_3d)
#if self.scanN % 100 == 0 and idx==0:
# rospy.loginfo( 'scanN= %d, point_2d:%s, point_3d:%s'%( self.scanN, point_2d, point_3d ) )
self.all_3d_points_ls += curscan_points
self.pcl_3d = pc2.create_cloud(pcl_LaserScan.header, pcl_LaserScan.fields, curscan_points)
def xyz_from_pcl(self,pcl):
gen = pc2.read_points(pcl, field_names=None, skip_nans=True)
points = []
for p in gen:
xyz = np.array(list(p)[1:4])
if points == []:
points = xyz
else:
points = np.vstack((points,xyz))
return points
def update_scan_increment(self):
'''
do this at the end
'''
self.increment = self.trunk_length / self.scanN
rospy.loginfo('increment = %f / %d = %f',self.trunk_length,self.scanN,self.increment)
def push(self,data_LaserScan):
# rospy.loginfo('project data_LaserScan to PointCloud OK')
pcl_LaserScan = LaserProjection().projectLaser(data_LaserScan)
points_xyz = self.xyz_from_pcl(pcl_LaserScan) # points_xyz: [N,3] [:,1]=0
# print "scan point N = ",points_xyz.shape[0]," / ", pcl_LaserScan.width, " rangesN = ",len(data_LaserScan.ranges)
if self.status == 'start' or self.status == 'scanning':
if self.status == 'start':
self.status = 'scanning'
self.add_data( pcl_LaserScan )
self.scanN += 1
self.pcl_3d_pub.publish(self.pcl_3d)
self.pcl3d_bag.write( 'pcl_3d', self.pcl_3d )
elif self.status == 'stop':
self.status = 'waitting'
if self.separate_models:
self.pcl_n = self.pcl_n + 1
self.reset()
self.pcl3d_bag.close()
rospy.loginfo('stop recording, save this model: ' + self.res_bag_name )
if self.status == 'scanning' and self.theta > 181.0 * math.pi / 180:
self.stop()
return self.scanN, self.theta
def dif_range(self,points_xyz):
'''
Compare the difference between points_xyz and self.ref_points_xyz.
Return the index of dif_start and dif_end
'''
min_N = min(points_xyz.shape[0],self.ref_points_xyz.shape[0])
dif = points_xyz[0:min_N,self.height_axis] - self.ref_points_xyz[0:min_N,self.height_axis]
dif = np.fabs(dif)
threshold = self.dif_threshold
dif_N = sum([ d > threshold for d in dif ])
self.scan_difN_pub.publish(dif_N)
if dif_N > 5:
dif_start = len(dif)
dif_end = 0
for i,d in enumerate(dif):
if dif_start==len(dif) and d > threshold and i+3<len(dif) and dif[i+1] > threshold and dif[i+3] > threshold:
dif_start = i
self.scan_difStart_pub.publish(dif_start)
if dif_start < len(dif) and i > dif_start and ( d < threshold or (d > threshold and i==len(dif)-1 ) ):
dif_end = i
self.scan_difEnd_pub.publish(dif_end)
if dif_end - dif_start > 3:
break
else:
# rospy.loginfo('short dif_range: dif_start= %d dif_end= %d dif_len= %d',dif_start,dif_end,dif_end-dif_start)
dif_start = len(dif)
dif_end = 0
return True,dif_start,dif_end
else:
return False,0,0
def volume_from_bag(self,model_bag_file):
model_bag = rosbag.Bag(model_bag_file)
msg_gen = model_bag.read_messages(topics='pcl_3d')
for topic,msg,t in msg_gen:
self. pcl_volume(msg)
if __name__ == '__main__':
print 'in main'
#TVD = RotateTo3D()
#TVD.volume_from_bag('model_result_new/empty.bag')
| mit |
pronojitsaha/tpot | setup.py | 1 | 1780 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
def calculate_version():
initpy = open('tpot/_version.py').read().split('\n')
version = list(filter(lambda x: '__version__' in x, initpy))[0].split('\'')[1]
return version
package_version = calculate_version()
setup(
name='TPOT',
version=package_version,
author='Randal S. Olson',
author_email='[email protected]',
packages=find_packages(),
url='https://github.com/rhiever/tpot',
license='GNU/GPLv3',
entry_points={'console_scripts': ['tpot=tpot:main', ]},
description=('Tree-based Pipeline Optimization Tool'),
long_description='''
A Python tool that automatically creates and optimizes machine learning pipelines using genetic programming.
Contact
=============
If you have any questions or comments about TPOT, please feel free to contact me via:
E-mail: [email protected]
or Twitter: https://twitter.com/randal_olson
This project is hosted at https://github.com/rhiever/tpot
''',
zip_safe=True,
install_requires=['numpy', 'scipy', 'pandas', 'scikit-learn', 'deap', 'xgboost', 'update_checker'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
keywords=['pipeline optimization', 'hyperparameter optimization', 'data science', 'machine learning', 'genetic programming', 'evolutionary computation'],
)
| gpl-3.0 |
natsutan/cocytus | tools/cqt_diff/cqt_diff_yolo_neon.py | 1 | 2682 | import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import sys
keras_dir = '../../example/tiny-yolo/keras/output/'
cqt_dir = '../../example/tiny-yolo/c_neon/output/'
qp_file = '../../example/tiny-yolo/c_neon/weight/'
fix16mode = False
neon = True
neon_padding_list = (0, 3, 2, 1)
def layer_dump(i, q, fnum = 3):
"""
引数で指定されたレイヤーの、Keras出力と、コキュートス出力を
比較して、画像に落とす。比較するフィルターは先頭から、fnum
まで。
出力はoutputディレクトリーに行われる。
:param i:int レイヤー番号
:param q:int 出力データのQ位置
:param fnum:int 画像化するフィルター数
:return:
"""
for f in range(fnum):
plt.figure()
graph_name = 'l%02d_%d' % (i, f)
kname = os.path.join(keras_dir+'l%02d_%d.npy' % (i, f))
cname = os.path.join(cqt_dir+'l%02d.npy' % i)
k_data = np.load(kname)
k_shape = k_data.shape
k_data = k_data.flatten()
c_data = np.load(cname)
xsize = c_data.shape[2]
padding = xsize - k_shape[1] - 4
if neon:
if padding != 0:
c_data_f = c_data[f,2:-4,4:-padding]
else:
c_data_f = c_data[f,2:-4,4:]
else:
c_data_f = c_data[f,:,:]
c_data_ff = c_data_f.flatten()
if fix16mode:
c_data_ff = c_data_ff.astype(np.float32) / (2 ** q)
x = np.arange(len(k_data))
plt.plot(x, k_data, color='b', label='Keras')
plt.plot(x, c_data_ff, color='r', label='Cocytus')
plt.title(graph_name)
plt.legend()
img_fname = os.path.join('output', graph_name+'.png')
print('save %s' % img_fname)
plt.savefig(img_fname)
plt.figure()
plt.plot(x, k_data - c_data_ff, color='g', label='diff')
plt.title(graph_name+'diff')
plt.legend()
img_fname = os.path.join('output', graph_name + '_diff.png')
plt.savefig(img_fname)
def read_qpfile(odir):
"""qpファイルを読み込み、入力、出力、重みのQ位置をリストにして返す"""
iqs = []
wqs = []
oqs = []
fname = os.path.join(odir, 'qp.txt')
for i, l in enumerate(open(fname).readlines()):
if i < 1:
continue
words = l.split(',')
iqs.append(int(words[0]))
oqs.append(int(words[1]))
wqs.append(int(words[2]))
return iqs, oqs, wqs
iqs, oqs, wqs = read_qpfile(qp_file)
#for i in range(31):
# layer_dump(i, oqs[i])
# here
layer_dump(31, oqs[0])
print('finish')
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.