repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
geodynamics/burnman
|
contrib/eos_fitting/eos_fitting.py
|
2
|
14905
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.edgecolor'] = 'black'
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../../burnman'):
sys.path.insert(1, os.path.abspath('../..'))
import burnman
from fitting_functions import read_fitting_file
if __name__ == "__main__":
"""
First, please create a data file. This file should be in one of the two following formats:
Type, P, T, property
Type, P, T, property, Perr, Terr, property_err
Type, P, T, property, cov_PP, cov_TT, cov_pp, cov_PT, cov_Pp, cov_Tp
where
err means standard error, and
cov is the covariance matrix of the data observations
PLEASE REMEMBER THAT cov_PP = Perr^2.
Type is a string describing the property value *as used in burnman*.
The property strings could be any of the following:
helmholtz
gibbs
H
S
V
molar_heat_capacity_p
molar_heat_capacity_v
p_wave_velocity
s_wave_velocity
K_T
K_S
Make sure that *all* parameters are in SI units.
"""
# Input file
filename = 'test.dat'
# Mineral to optimise (along with tweaks to initial properties if necessary)
mineral = burnman.minerals.SLB_2011.periclase()
mineral.set_state(1.e5, 300.)
mineral.params['F_0'] = mineral.params['F_0'] - mineral.H
# Fit parameters
fit_params = ['V_0', 'K_0', 'Kprime_0', 'grueneisen_0', 'q_0', 'Debye_0', 'F_0']
# Pressure and temperature sections to plot through the models
pressures = np.linspace(1.e5, 100.e9, 101)
temperature_sections = [300., 2000.]
temperatures = np.linspace(300., 2000., 101)
pressure_sections = [1.e5]
# Properties to plot which have data in the input file
properties_for_data_comparison_plots = [('V', 1.e6, 'Volume (cm^3/mol)'),
('H', 1.e-3, 'Enthalpy (kJ/mol)')]
# Properties to plot along with confidence interval
properties_for_confidence_plots = [('p_wave_velocity', 1.e-3, 'P wave velocity (km/s)'),
('K_T', 1.e-9, 'Bulk modulus (GPa)'),
('alpha', 1., 'Thermal expansion (/K)'),
(['alpha', 'K_T'], 1.e-6, 'Thermal pressure (MPa/K)')]
confidence_interval = 0.95
remove_outliers = True
good_data_confidence_interval = 0.9
param_tolerance = 1.e-5
# That's it for user inputs. Now just sit back and watch the plots appear...
flags, data, data_covariances = read_fitting_file(filename)
list_flags = list(set(flags))
print('Starting to fit user-defined data. Please be patient.')
fitted_eos = burnman.eos_fitting.fit_PTp_data(mineral = mineral,
flags = flags,
fit_params = fit_params,
data = data,
data_covariances = data_covariances,
param_tolerance = param_tolerance,
verbose = False)
# Print the optimized parameters
print('Optimized equation of state:')
burnman.tools.pretty_print_values(fitted_eos.popt, fitted_eos.pcov, fitted_eos.fit_params)
print('\nParameters:')
print(fitted_eos.popt)
print('\nFull covariance matrix:')
print(fitted_eos.pcov)
print('\nGoodness of fit:')
print(fitted_eos.goodness_of_fit)
print('\n')
# Create a plot of the residuals
fig, ax = plt.subplots()
burnman.nonlinear_fitting.plot_residuals(ax=ax,
weighted_residuals=fitted_eos.weighted_residuals,
flags=fitted_eos.flags)
plt.show()
confidence_bound, indices, probabilities = burnman.nonlinear_fitting.extreme_values(fitted_eos.weighted_residuals, good_data_confidence_interval)
if indices != [] and remove_outliers == True:
print('Removing {0:d} outliers (at the {1:.1f}% confidence interval) and refitting. Please wait just a little longer.'.format(len(indices), good_data_confidence_interval*100.))
mask = [i for i in range(len(fitted_eos.weighted_residuals)) if i not in indices]
flags = [flag for i, flag in enumerate(flags) if i not in indices]
data = data[mask]
data_covariances = data_covariances[mask]
fitted_eos = burnman.eos_fitting.fit_PTp_data(mineral = mineral,
flags = flags,
fit_params = fit_params,
data = data,
data_covariances = data_covariances,
param_tolerance = param_tolerance,
verbose = False)
# Print the optimized parameters
print('Optimized equation of state:')
burnman.tools.pretty_print_values(fitted_eos.popt, fitted_eos.pcov, fitted_eos.fit_params)
print('\nParameters:')
print(fitted_eos.popt)
print('\nFull covariance matrix:')
print(fitted_eos.pcov)
print('\nGoodness of fit:')
print(fitted_eos.goodness_of_fit)
print('\n')
# Create a plot of the residuals
fig, ax = plt.subplots()
burnman.nonlinear_fitting.plot_residuals(ax=ax,
weighted_residuals=fitted_eos.weighted_residuals,
flags=fitted_eos.flags)
plt.show()
# Create a corner plot of the covariances
fig, ax_array = burnman.nonlinear_fitting.corner_plot(popt=fitted_eos.popt,
pcov=fitted_eos.pcov,
param_names=fitted_eos.fit_params)
plt.show()
# Create plots for the weighted residuals of each type of measurement
for i, (material_property, scaling, name) in enumerate(properties_for_data_comparison_plots):
fig, ax = plt.subplots()
burnman.nonlinear_fitting.weighted_residual_plot(ax=ax,
model=fitted_eos,
flag=material_property,
sd_limit=3,
cmap=plt.cm.RdYlBu,
plot_axes=[0, 1],
scale_axes=[1.e-9, 1.])
ax.set_title('Weighted residual plot for {0:s}'.format(name))
ax.set_xlabel('Pressure (GPa)')
ax.set_ylabel('Temperature (K)')
plt.show()
flag_mask = [i for i, flag in enumerate(flags) if flag==material_property]
if temperature_sections != []:
for T in temperature_sections:
PTVs = np.array([pressures, [T]*len(pressures), mineral.evaluate(['V'], pressures, [T]*len(pressures))[0]]).T
# Plot confidence bands on the volumes
cp_bands = burnman.nonlinear_fitting.confidence_prediction_bands(model=fitted_eos,
x_array=PTVs,
confidence_interval=confidence_interval,
f=burnman.tools.attribute_function(mineral, material_property),
flag='V')
plt.plot(PTVs[:,0] / 1.e9, (cp_bands[0] + cp_bands[1])/2.*scaling, label='Optimised fit at {0:.0f} K'.format(T))
plt.plot(PTVs[:,0] / 1.e9, (cp_bands[0])*scaling, linestyle='--', color='r', label='{0:.1f}% confidence bands'.format(confidence_interval*100))
plt.plot(PTVs[:,0] / 1.e9, (cp_bands[1])*scaling, linestyle='--', color='r')
plt.errorbar(fitted_eos.data[:,0][flag_mask] / 1.e9, fitted_eos.data[:,2][flag_mask]*scaling,
xerr=np.sqrt(fitted_eos.data_covariances.T[0][0][flag_mask]) / 1.e9,
yerr=np.sqrt(fitted_eos.data_covariances.T[2][2][flag_mask])*scaling,
linestyle='None', marker='o', label='Data')
plt.plot(fitted_eos.data_mle[:,0][flag_mask] / 1.e9, fitted_eos.data_mle[:,2][flag_mask]*scaling, marker='o', markersize=2, color='k', linestyle='None', label='Maximum likelihood estimates')
plt.ylabel('{0:s}'.format(name))
plt.xlabel('Pressure (GPa)')
plt.legend(loc='upper right')
plt.title('Data comparison for fitted equation of state as a function of pressure')
plt.show()
if pressure_sections != []:
for P in pressure_sections:
PTVs = np.array([[P]*len(temperatures), temperatures, mineral.evaluate(['V'], [P]*len(temperatures), temperatures)[0]]).T
# Plot confidence bands on the volumes
cp_bands = burnman.nonlinear_fitting.confidence_prediction_bands(model=fitted_eos,
x_array=PTVs,
confidence_interval=confidence_interval,
f=burnman.tools.attribute_function(mineral, material_property),
flag='V')
plt.plot(PTVs[:,1], (cp_bands[0] + cp_bands[1])/2.*scaling, label='Optimised fit at {0:.0f} GPa'.format(P/1.e9))
plt.plot(PTVs[:,1], (cp_bands[0])*scaling, linestyle='--', color='r', label='{0:.1f}% confidence bands'.format(confidence_interval*100))
plt.plot(PTVs[:,1], (cp_bands[1])*scaling, linestyle='--', color='r')
plt.errorbar(fitted_eos.data[:,1][flag_mask], fitted_eos.data[:,2][flag_mask]*scaling,
xerr=np.sqrt(fitted_eos.data_covariances.T[1][1][flag_mask]),
yerr=np.sqrt(fitted_eos.data_covariances.T[2][2][flag_mask])*scaling,
linestyle='None', marker='o', label='Data')
plt.plot(fitted_eos.data_mle[:,1][flag_mask], fitted_eos.data_mle[:,2][flag_mask]*scaling, marker='o', markersize=2, color='k', linestyle='None', label='Maximum likelihood estimates')
plt.ylabel('{0:s}'.format(name))
plt.xlabel('Temperature (K)')
plt.legend(loc='upper right')
plt.title('Data comparison for fitted equation of state as a function of temperature')
plt.show()
# We can also look at the uncertainty in other properties
# For example, let's look at the uncertainty in P wave velocities, bulk modulus, thermal expansion and thermal pressure
def closest_factors(n):
d = int(np.floor(np.sqrt(n)))
for i in reversed(range(1, d+1)):
if (n % i) == 0:
return i, int(n/i)
nj, ni = closest_factors(len(properties_for_confidence_plots))
if temperature_sections != []:
fig = plt.figure()
for T in temperature_sections:
PTVs = np.array([pressures, [T]*len(pressures), mineral.evaluate(['V'], pressures, [T]*len(pressures))[0]]).T
for i, (material_property, scaling, name) in enumerate(properties_for_confidence_plots):
ax = fig.add_subplot(ni, nj, i+1)
# Plot the confidence bands for the various material properties
cp_bands = burnman.nonlinear_fitting.confidence_prediction_bands(model=fitted_eos,
x_array=PTVs,
confidence_interval=confidence_interval,
f=burnman.tools.attribute_function(mineral, material_property),
flag='V')
ax.plot(PTVs[:,0]/1.e9, (cp_bands[0] + cp_bands[1])/2*scaling, label='Best fit at {0:.0f} K'.format(T))
ax.plot(PTVs[:,0]/1.e9, (cp_bands[0])*scaling, linestyle='--', color='r', label='{0:.1f}% confidence bands'.format(confidence_interval*100))
ax.plot(PTVs[:,0]/1.e9, (cp_bands[1])*scaling, linestyle='--', color='r')
plt.ylabel(name)
plt.xlabel('Pressure (GPa)')
plt.legend(loc='upper right')
plt.show()
if pressure_sections != []:
fig = plt.figure()
for P in pressure_sections:
PTVs = np.array([[P]*len(temperatures), temperatures, mineral.evaluate(['V'], [P]*len(temperatures), temperatures)[0]]).T
for i, (material_property, scaling, name) in enumerate(properties_for_confidence_plots):
ax = fig.add_subplot(ni,nj, i+1)
# Plot the confidence bands for the various material properties
cp_bands = burnman.nonlinear_fitting.confidence_prediction_bands(model=fitted_eos,
x_array=PTVs,
confidence_interval=confidence_interval,
f=burnman.tools.attribute_function(mineral, material_property),
flag='V')
ax.plot(PTVs[:,1], (cp_bands[0] + cp_bands[1])/2*scaling, label='Best fit at {0:.0f} GPa'.format(P/1.e9))
ax.plot(PTVs[:,1], (cp_bands[0])*scaling, linestyle='--', color='r', label='{0:.1f}% confidence bands'.format(confidence_interval*100))
ax.plot(PTVs[:,1], (cp_bands[1])*scaling, linestyle='--', color='r')
plt.ylabel(name)
plt.xlabel('Temperature (K)')
plt.legend(loc='upper right')
plt.show()
|
gpl-2.0
|
perryjohnson/biplaneblade
|
biplane_blade_lib/prep_stn23_mesh.py
|
1
|
31016
|
"""Write initial TrueGrid files for one biplane blade station.
Usage
-----
start an IPython (qt)console with the pylab flag:
$ ipython qtconsole --pylab
or
$ ipython --pylab
Then, from the prompt, run this script:
|> %run biplane_blade_lib/prep_stnXX_mesh.py
or
|> import biplane_blade_lib/prep_stnXX_mesh
Author: Perry Roth-Johnson
Last updated: April 30, 2014
"""
import matplotlib.pyplot as plt
import lib.blade as bl
reload(bl)
import lib.poly_utils as pu
reload(pu)
from shapely.geometry import Polygon
from shapely.affinity import translate
# SET THESE PARAMETERS -----------------
station_num = 23
# --------------------------------------
plt.close('all')
# load the biplane blade
b1 = bl.BiplaneBlade(
'biplane blade, flapwise symmetric, no stagger, rj/R=0.452, g/c=1.25',
'biplane_blade')
# pre-process the station dimensions
station = b1.list_of_stations[station_num-1]
station.airfoil.create_polygon()
station.structure.create_all_layers()
station.structure.save_all_layer_edges()
station.structure.write_all_part_polygons()
# plot the parts
station.plot_parts()
# access the structure and airfoil for this station
st = station.structure
af = station.airfoil
x3_off = af.lower_chord * af.gap_to_chord_ratio * af.gap_fraction
# upper spar cap -----------------------------------------------------------
label = 'upper spar cap'
# create the bounding polygon
usc = st.lower_spar_cap.layer['upper']
is2 = st.lower_internal_surface_2.layer['resin']
points_usc = [
(-0.75, usc.left[0][1]), # lower_SparCap_upper.txt
is2.polygon.interiors[0].coords[0], # lower_InternalSurface2_resin.txt
is2.polygon.interiors[0].coords[39-28], # lower_InternalSurface2_resin.txt
( 0.75, usc.right[1][1]), # lower_SparCap_upper.txt
( 0.75, 0.0),
(-0.75, 0.0)
]
bounding_polygon = Polygon(points_usc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'triax', label,
bounding_polygon, airfoil='lower')
# lower spar cap -----------------------------------------------------------
label = 'lower spar cap'
# create the bounding polygon
lsc = st.lower_spar_cap.layer['lower']
points_lsc = [
(-0.75,-6.5),
( 0.75,-6.5),
( 0.75000000, lsc.right[0][1]), # lower_SparCap_lower.txt
is2.polygon.interiors[0].coords[38-28], # lower_InternalSurface2_resin.txt
is2.polygon.interiors[0].coords[1], # lower_InternalSurface2_resin.txt
(-0.75000000, lsc.left[1][1]) # lower_SparCap_lower.txt
]
bounding_polygon = Polygon(points_lsc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'triax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, upper 1 ------------------------------------------------
label = 'TE reinforcement, upper 1'
# create the bounding polygon
ter = st.lower_TE_reinforcement.layer['foam']
is4 = st.lower_internal_surface_4.layer['resin']
points_teu1 = [
(ter.top[0][0], -1.5), # TE_Reinforcement_foam.txt
tuple(ter.top[0]),
is4.polygon.interiors[0].coords[525-224], # InternalSurface4_resin.txt
(3.3, -2.7), # TE_Reinforcement_foam.txt
is4.polygon.interiors[0].coords[518-224], # InternalSurface4_resin.txt
(is4.polygon.interiors[0].coords[518-224][0], -1.5) # InternalSurface4_resin.txt
]
bounding_polygon = Polygon(points_teu1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, lower 1 ------------------------------------------------
label = 'TE reinforcement, lower 1'
# create the bounding polygon
points_tel1 = [
(ter.bottom[0][0], -5.0), # TE_Reinforcement_foam.txt
tuple(ter.bottom[1]),
is4.polygon.interiors[0].coords[357-224], # InternalSurface4_resin.txt
(3.3, -2.7), # TE_Reinforcement_foam.txt
is4.polygon.interiors[0].coords[518-224], # InternalSurface4_resin.txt
(is4.polygon.interiors[0].coords[518-224][0], -5.0) # InternalSurface4_resin.txt
]
bounding_polygon = Polygon(points_tel1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, upper 2 ------------------------------------------------
label = 'TE reinforcement, upper 2'
# create the bounding polygon
points_teu2 = [
points_teu1[-1],
points_teu1[-2],
ter.polygon.exterior.coords[94-3], # lower_TE_reinforcement_foam.txt
(ter.polygon.exterior.coords[94-3][0], -1.5) # lower_TE_reinforcement_foam.txt
]
bounding_polygon = Polygon(points_teu2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, lower 2 ------------------------------------------------
label = 'TE reinforcement, lower 2'
# create the bounding polygon
points_tel2 = [
(points_teu2[0][0], -5.0),
points_teu2[1],
points_teu2[2],
(points_teu2[2][0], -5.0)
]
bounding_polygon = Polygon(points_tel2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, upper 3 ------------------------------------------------
label = 'TE reinforcement, upper 3'
# create the bounding polygon
points_teu3 = [
points_teu2[-1],
points_teu2[-2],
ter.polygon.exterior.coords[0], # TE_Reinforcement_foam.txt
(ter.polygon.exterior.coords[0][0], -1.5) # TE_Reinforcement_foam.txt
]
bounding_polygon = Polygon(points_teu3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, lower 3 ------------------------------------------------
label = 'TE reinforcement, lower 3'
# create the bounding polygon
points_tel3 = [
(points_teu3[0][0], -5.0),
points_teu3[1],
points_teu3[2],
(points_teu3[2][0], -5.0)
]
bounding_polygon = Polygon(points_tel3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, upper 4 ------------------------------------------------
label = 'TE reinforcement, upper 4'
# create the bounding polygon
es = st.lower_external_surface.layer['gelcoat']
teru = st.lower_TE_reinforcement.layer['uniax']
points_teu4 = [
points_teu3[-1],
points_teu3[-2],
(teru.polygon.exterior.coords[-2][0], -2.87), # TE_Reinforcement_uniax.txt
teru.polygon.exterior.coords[-2], # TE_Reinforcement_uniax.txt
es.polygon.exterior.coords[-2],
(teru.polygon.exterior.coords[-2][0], -1.5) # TE_Reinforcement_uniax.txt
]
bounding_polygon = Polygon(points_teu4)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, lower 4 ------------------------------------------------
label = 'TE reinforcement, lower 4'
# create the bounding polygon
points_tel4 = [
(points_teu4[0][0], -5.0),
points_teu4[1],
points_teu4[2],
teru.polygon.exterior.coords[-1], # TE_Reinforcement_uniax.txt
es.polygon.exterior.coords[-1],
(points_teu4[2][0], -5.0)
]
bounding_polygon = Polygon(points_tel4)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# LE panel -----------------------------------------------------------------
label = 'LE panel'
# create the bounding polygon
lep = st.lower_LE_panel.layer['foam']
is1 = st.lower_internal_surface_1.layer['resin']
points_le = [
(-3.00,-6.5),
(-0.836,-6.5),
tuple(lep.bottom[0]), # lower_LE_Panel_foam.txt
is1.polygon.interiors[0].coords[0], # lower_InternalSurface1_resin.txt
(-1.5, -x3_off),
is1.polygon.interiors[0].coords[1], # lower_InternalSurface1_resin.txt
tuple(lep.top[1]), # lower_LE_Panel_foam.txt
(-0.836, 0.0),
(-3.00, 0.0)
]
bounding_polygon = Polygon(points_le)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_1, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_1, 'triax', label,
bounding_polygon, airfoil='lower')
# upper aft panel 1 -------------------------------------------------------
label = 'upper aft panel 1'
# create the bounding polygon
ap1u = st.lower_aft_panel_1.layer['upper']
is3 = st.lower_internal_surface_3.layer['resin']
points_ap1u = [
(0.836, 0.0),
(ap1u.right[1][0], 0.0), # lower_AftPanel1_upper.txt
tuple(ap1u.right[1]), # lower_AftPanel1_upper.txt
is3.polygon.interiors[0].coords[61-35], # lower_InternalSurface3_resin.txt
(2.0, -2.7),
is3.polygon.interiors[0].coords[-2], # lower_InternalSurface3_resin.txt
tuple(ap1u.left[0]) # lower_AftPanel1_upper.txt
]
bounding_polygon = Polygon(points_ap1u)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'triax', label,
bounding_polygon, airfoil='lower')
# lower aft panel 1 -------------------------------------------------------
label = 'lower aft panel 1'
# create the bounding polygon
ap1l = st.lower_aft_panel_1.layer['lower']
points_ap1l = [
(0.836, -6.5),
(ap1l.right[0][0], -6.5), # lower_AftPanel1_lower.txt
tuple(ap1l.right[0]), # lower_AftPanel1_lower.txt
is3.polygon.interiors[0].coords[60-35], # lower_InternalSurface3_resin.txt
(2.0, -2.7),
is3.polygon.interiors[0].coords[-1], # lower_InternalSurface3_resin.txt
tuple(ap1l.left[1]) # lower_AftPanel1_lower.txt
]
bounding_polygon = Polygon(points_ap1l)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'triax', label,
bounding_polygon, airfoil='lower')
# upper aft panel 2 -------------------------------------------------------
label = 'upper aft panel 2'
# create the bounding polygon
ap2u = st.lower_aft_panel_2.layer['upper']
sw3br = st.lower_shear_web_3.layer['biax, right']
points_ap2u = [
(sw3br.right[0][0], 0.0),
(ap2u.right[1][0], 0.0), # AftPanel2_upper.txt
tuple(ap2u.right[1]), # AftPanel2_upper.txt
# is4.polygon.interiors[0].coords[459-224], # InternalSurface4_resin.txt
(ap2u.right[1][0], -2.7),
(2.5, -2.7),
is4.polygon.interiors[0].coords[-2], # InternalSurface4_resin.txt
tuple(ap2u.left[0]) # AftPanel2_upper.txt
]
bounding_polygon = Polygon(points_ap2u)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon)
# lower aft panel 2 -------------------------------------------------------
label = 'lower aft panel 2'
# create the bounding polygon
ap2l = st.lower_aft_panel_2.layer['lower']
points_ap2l = [
(sw3br.right[0][0], -5.4),
(ap2l.right[0][0], -5.4), # AftPanel2_lower.txt
tuple(ap2l.right[0]), # AftPanel2_lower.txt
# is4.polygon.interiors[0].coords[296-224], # InternalSurface4_resin.txt
(ap2l.right[0][0], -2.7),
(2.5, -2.7),
is4.polygon.interiors[0].coords[-1], # InternalSurface4_resin.txt
tuple(ap2l.left[1]) # AftPanel2_lower.txt
]
bounding_polygon = Polygon(points_ap2l)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon)
# above shear web 1 ----------------------------------------------------------
label = 'above shear web 1'
# create the bounding polygon
points_asw1 = [
(-0.75, 0.0),
(-0.75, -2.7),
(-0.836, -2.7),
(-0.836, 0.0)
]
bounding_polygon = Polygon(points_asw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# below shear web 1 ----------------------------------------------------------
label = 'below shear web 1'
# create the bounding polygon
points_bsw1 = [
(-0.75, -6.5),
(-0.75, -2.7),
(-0.836, -2.7),
(-0.836, -6.5)
]
bounding_polygon = Polygon(points_bsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# above shear web 2 ----------------------------------------------------------
label = 'above shear web 2'
# create the bounding polygon
points_asw2 = [
(0.75, 0.0),
(0.75, -2.7),
(0.836, -2.7),
(0.836, 0.0)
]
bounding_polygon = Polygon(points_asw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# below shear web 2 ----------------------------------------------------------
label = 'below shear web 2'
# create the bounding polygon
points_bsw2 = [
(0.75, -6.5),
(0.75, -2.7),
(0.836, -2.7),
(0.836, -6.5)
]
bounding_polygon = Polygon(points_bsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# above shear web 3 ----------------------------------------------------------
label = 'above shear web 3'
sw3bl = st.lower_shear_web_3.layer['biax, left']
# create the bounding polygon
points_asw3 = [
(sw3bl.left[0][0], 0.0),
(sw3bl.left[0][0], -2.7),
(sw3br.right[0][0], -2.7),
(sw3br.right[0][0], 0.0)
]
bounding_polygon = Polygon(points_asw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# below shear web 3 ----------------------------------------------------------
label = 'below shear web 3'
# create the bounding polygon
points_bsw3 = [
(sw3bl.left[0][0], -6.5),
(sw3bl.left[0][0], -2.7),
(sw3br.right[0][0], -2.7),
(sw3br.right[0][0], -6.5)
]
bounding_polygon = Polygon(points_bsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# left of shear web 1 -------------------------------------------------------
label = 'left of shear web 1'
# create the bounding polygon
points_lsw1 = points_le[2:-2]
bounding_polygon = Polygon(points_lsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_1, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_1, 'triax', label,
bounding_polygon, airfoil='lower')
# right of shear web 1 -------------------------------------------------------
label = 'right of shear web 1'
# create the bounding polygon
points_rsw1 = [
points_usc[0],
points_usc[1],
(0.0, -x3_off),
points_lsc[-2],
points_lsc[-1]
]
bounding_polygon = Polygon(points_rsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'triax', label,
bounding_polygon, airfoil='lower')
# left of shear web 2 -------------------------------------------------------
label = 'left of shear web 2'
# create the bounding polygon
points_lsw2 = [
points_usc[3],
points_usc[2],
(0.0, -x3_off),
points_lsc[3],
points_lsc[2]
]
bounding_polygon = Polygon(points_lsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'triax', label,
bounding_polygon, airfoil='lower')
# right of shear web 2 -------------------------------------------------------
label = 'right of shear web 2'
# create the bounding polygon
points_rsw2 = [
points_ap1u[-1],
points_ap1u[-2],
(1.5, -x3_off),
points_ap1l[-2],
points_ap1l[-1]
]
bounding_polygon = Polygon(points_rsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'triax', label,
bounding_polygon, airfoil='lower')
# left of shear web 3 -------------------------------------------------------
label = 'left of shear web 3'
# create the bounding polygon
points_lsw3 = [
points_ap1u[2],
points_ap1u[3],
(1.5, -2.7),
points_ap1l[3],
points_ap1l[2]
]
bounding_polygon = Polygon(points_lsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'triax', label,
bounding_polygon, airfoil='lower')
# right of shear web 3 -------------------------------------------------------
label = 'right of shear web 3'
# create the bounding polygon
points_rsw3 = [
points_ap2u[-1],
points_ap2u[-2],
(3.0, -2.7),
points_ap2l[-2],
points_ap2l[-1]
]
bounding_polygon = Polygon(points_rsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
# -----------------------------------------------------------------------------
list_of_mesh_layers = []
# translate all the alt layers in each part
for (name, layer) in st.lower_TE_reinforcement.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
for (name, layer) in st.lower_external_surface.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
for (name, layer) in st.lower_internal_surface_1.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
for (name, layer) in st.lower_internal_surface_2.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
for (name, layer) in st.lower_internal_surface_3.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
for (name, layer) in st.lower_internal_surface_4.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
# translate all the remaining regular layers
st.lower_spar_cap.layer['upper'].move(x3_off)
st.lower_spar_cap.layer['lower'].move(x3_off)
st.lower_aft_panel_1.layer['upper'].move(x3_off)
st.lower_aft_panel_1.layer['lower'].move(x3_off)
st.lower_aft_panel_2.layer['upper'].move(x3_off)
st.lower_aft_panel_2.layer['lower'].move(x3_off)
st.lower_LE_panel.layer['foam'].move(x3_off)
st.lower_shear_web_1.layer['biax, left'].move(x3_off)
st.lower_shear_web_1.layer['foam'].move(x3_off)
st.lower_shear_web_1.layer['biax, right'].move(x3_off)
st.lower_shear_web_2.layer['biax, left'].move(x3_off)
st.lower_shear_web_2.layer['foam'].move(x3_off)
st.lower_shear_web_2.layer['biax, right'].move(x3_off)
st.lower_shear_web_3.layer['biax, left'].move(x3_off)
st.lower_shear_web_3.layer['foam'].move(x3_off)
st.lower_shear_web_3.layer['biax, right'].move(x3_off)
list_of_mesh_layers.append(st.lower_spar_cap.layer['upper'])
list_of_mesh_layers.append(st.lower_spar_cap.layer['lower'])
list_of_mesh_layers.append(st.lower_aft_panel_1.layer['upper'])
list_of_mesh_layers.append(st.lower_aft_panel_1.layer['lower'])
list_of_mesh_layers.append(st.lower_aft_panel_2.layer['upper'])
list_of_mesh_layers.append(st.lower_aft_panel_2.layer['lower'])
list_of_mesh_layers.append(st.lower_LE_panel.layer['foam'])
list_of_mesh_layers.append(st.lower_shear_web_1.layer['biax, left'])
list_of_mesh_layers.append(st.lower_shear_web_1.layer['foam'])
list_of_mesh_layers.append(st.lower_shear_web_1.layer['biax, right'])
list_of_mesh_layers.append(st.lower_shear_web_2.layer['biax, left'])
list_of_mesh_layers.append(st.lower_shear_web_2.layer['foam'])
list_of_mesh_layers.append(st.lower_shear_web_2.layer['biax, right'])
list_of_mesh_layers.append(st.lower_shear_web_3.layer['biax, left'])
list_of_mesh_layers.append(st.lower_shear_web_3.layer['foam'])
list_of_mesh_layers.append(st.lower_shear_web_3.layer['biax, right'])
# plot the lower airfoil in the local beam coordinate system
# (translate it up by the appropriate gap distance: x3_off)
fig,ax = plt.subplots()
fmt1 = "Station #{0}, {1}, {2}% span\n"
fmt2 = "lower airfoil in local beam coordinate system (x3-offset = {3:+.4f})"
fmt = fmt1 + fmt2
ax.set_title(fmt.format(station.station_num, station.airfoil.name,
station.coords.x1, x3_off))
lp2 = translate(af.lower_polygon, yoff=x3_off)
(minx, miny, maxx, maxy) = lp2.bounds
ax.set_xlim([minx*1.2,maxx*1.2])
ax.set_ylim([miny*1.2,maxy*1.2])
plt.grid('on')
ax.set_xlabel('x2 [meters]')
ax.set_ylabel('x3 [meters]')
ax.set_aspect('equal')
for layer in list_of_mesh_layers:
station.plot_polygon(layer.polygon, ax, layer.face_color, layer.edge_color,
alpha=0.8)
# show the plots
plt.show()
# write the TrueGrid input file for mesh generation ---------------------
st.write_truegrid_inputfile(
interrupt_flag=True,
additional_layers=[
st.lower_spar_cap.layer['upper'],
st.lower_spar_cap.layer['lower'],
st.lower_aft_panel_1.layer['upper'],
st.lower_aft_panel_1.layer['lower'],
st.lower_aft_panel_2.layer['upper'],
st.lower_aft_panel_2.layer['lower'],
st.lower_LE_panel.layer['foam'],
st.lower_shear_web_1.layer['biax, left'],
st.lower_shear_web_1.layer['foam'],
st.lower_shear_web_1.layer['biax, right'],
st.lower_shear_web_2.layer['biax, left'],
st.lower_shear_web_2.layer['foam'],
st.lower_shear_web_2.layer['biax, right'],
st.lower_shear_web_3.layer['biax, left'],
st.lower_shear_web_3.layer['foam'],
st.lower_shear_web_3.layer['biax, right']
],
alt_TE_reinforcement=True,
soft_warning=True)
|
gpl-3.0
|
theoryno3/scikit-learn
|
examples/linear_model/lasso_dense_vs_sparse_data.py
|
348
|
1862
|
"""
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
|
bsd-3-clause
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/pandas/core/frame.py
|
7
|
214523
|
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import types
import warnings
from numpy import nan as NA
import numpy as np
import numpy.ma as ma
from pandas.types.cast import (_maybe_upcast,
_infer_dtype_from_scalar,
_possibly_cast_to_datetime,
_possibly_infer_to_datetimelike,
_possibly_convert_platform,
_possibly_downcast_to_dtype,
_invalidate_string_dtypes,
_coerce_to_dtypes,
_maybe_upcast_putmask,
_find_common_type)
from pandas.types.common import (is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_datetimetz,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
_ensure_float,
_ensure_float64,
_ensure_int64,
_ensure_platform_int,
is_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.types.missing import isnull, notnull
from pandas.core.common import (PandasError, _try_sort,
_default_index,
_values_from_object,
_maybe_box_datetimelike,
_dict_compat)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import pandas.computation.expressions as expressions
import pandas.core.algorithms as algos
from pandas.computation.eval import eval as _eval
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util.decorators import deprecate_kwarg, Appender, Substitution
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
import pandas.core.base as base
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.formats.format as fmt
from pandas.formats.printing import pprint_thing
import pandas.tools.plotting as gfx
import pandas.lib as lib
import pandas.algos as _algos
from pandas.core.config import get_option
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""")
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame objects by performing a database-style join operation by
columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
on : label or list
Field names to join on. Must be found in both DataFrames. If on is
None and not merging on indexes, then it merges on the intersection of
the columns by default.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
copy : boolean, default True
If False, do not copy data unnecessarily
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
.. versionadded:: 0.17.0
Examples
--------
>>> A >>> B
lkey value rkey value
0 foo 1 0 foo 5
1 bar 2 1 bar 6
2 baz 3 2 qux 7
3 foo 4 3 bar 8
>>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
2 bar 2 bar 6
3 bar 2 bar 8
4 baz 3 NaN NaN
5 NaN NaN qux 7
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge_ordered
merge_asof
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
index : Index or array-like
Index to use for resulting frame. Will default to np.arange(n) if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
np.arange(n) if no column labels are provided
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
>>> d = {'col1': ts1, 'col2': ts2}
>>> df = DataFrame(data=d, index=index)
>>> df2 = DataFrame(np.random.randn(10, 5))
>>> df3 = DataFrame(np.random.randn(10, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = dict((k, data[k]) for k in data_columns)
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None):
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (list, types.GeneratorType)):
if isinstance(data, types.GeneratorType):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = _default_index(len(data[0]))
else:
index = _default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
elif isinstance(data, collections.Iterator):
raise TypeError("data argument can't be an iterator")
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: %s' % e)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
if isinstance(data, compat.string_types) and dtype is None:
dtype = np.object_
if dtype is None:
dtype, data = _infer_dtype_from_scalar(data)
values = np.empty((len(index), len(columns)), dtype=dtype)
values.fill(data)
mgr = self._init_ndarray(values, index, columns, dtype=dtype,
copy=False)
else:
raise PandasError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
columns = _ensure_index(columns)
# GH10856
# raise ValueError if only scalars in dict
if index is None:
extract_index(list(data.values()))
# prefilter if columns passed
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
if index is None:
index = extract_index(list(data.values()))
else:
index = _ensure_index(index)
arrays = []
data_names = []
for k in columns:
if k not in data:
# no obvious "empty" int column
if dtype is not None and issubclass(dtype.type,
np.integer):
continue
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
elif np.issubdtype(dtype, np.flexible):
v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
v.fill(NA)
else:
v = data[k]
data_names.append(k)
arrays.append(v)
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
keys = _try_sort(keys)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = _default_index(N)
else:
index = _ensure_index(index)
if columns is None:
columns = _default_index(K)
else:
columns = _ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif is_datetimetz(values):
return self._init_dict({0: values}, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if values.dtype != dtype:
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '%s' (Exception was: %s)"
% (dtype, orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = _possibly_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list with the row axis labels and column axis labels as the
only members. They are returned in that order.
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
"""
return len(self.index), len(self.columns)
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = fmt.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not com.in_interactive_session():
return True
if (get_option('display.width') is not None or
com.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actualy checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = fmt.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular Dataframe.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if get_option('display.latex.repr'):
return self.to_latex()
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.formats.style.Styler
"""
from pandas.formats.style import Styler
return Styler(self)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
itertuples : Iterate over DataFrame rows as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples, with index value as first
element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
name : string, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='a', col1=1, col2=0.10000000000000001)
Pandas(Index='b', col1=2, col2=0.20000000000000001)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
if compat.PY3: # pragma: no cover
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None):
"""
Construct DataFrame from dict of array-like or dicts
Parameters
----------
data : dict
{field : array-like} or {field : dict}
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
DataFrame
"""
index, columns = None, None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient != 'columns': # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict'):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
.. versionadded:: 0.17.0
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
Returns
-------
result : dict like {column -> {index -> value}}
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning)
if orient.lower().startswith('d'):
return dict((k, v.to_dict()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return dict((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return {'index': self.index.tolist(),
'columns': self.columns.tolist(),
'data': lib.map_infer(self.values.ravel(),
_maybe_box_datetimelike)
.reshape(self.values.shape).tolist()}
elif orient.lower().startswith('s'):
return dict((k, _maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [dict((k, _maybe_box_datetimelike(v))
for k, v in zip(self.columns, row))
for row in self.values]
elif orient.lower().startswith('i'):
return dict((k, v.to_dict()) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
"""Write a DataFrame to a Google BigQuery table.
THIS IS AN EXPERIMENTAL LIBRARY
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
'fail': If table exists, do nothing.
'replace': If table exists, drop it, recreate it, and insert data.
'append': If table exists, insert data. Create if does not exist.
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
.. versionadded:: 0.17.0
"""
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
chunksize=chunksize, verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = _ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = _ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = _ensure_index(arr_columns)
if columns is not None:
columns = _ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
result_index = MultiIndex.from_arrays(
[arrays[i] for i in to_remove], names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if is_datetime64_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = lmap(str, index_names) + lmap(str, self.columns)
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(str, self.columns)
dtype = np.dtype([(x, v.dtype) for x, v in zip(names, arrays)])
return np.rec.fromarrays(arrays, dtype=dtype, names=names)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = _ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(_ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = _ensure_index(keys)
arrays = values
return cls._from_arrays(arrays, columns, None)
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = _ensure_index(keys)
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=False,
infer_datetime_format=False):
"""
Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv`
instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sortlevel(0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=False, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ','
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is assumed
to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are comverted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
.. versionadded:: 0.16.0
"""
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator, sep=sep,
encoding=encoding,
compression=compression, quoting=quoting,
na_rep=na_rep, float_format=float_format,
cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True):
"""
Write DataFrame to a excel sheet
Parameters
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
sheet_name : string, default 'Sheet1'
Name of sheet which will contain DataFrame
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : boolean, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding: string, default None
encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : string, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel)
Notes
-----
If passing an existing ExcelWriter object, then the sheet will be added
to the existing workbook. This can be used to save different
DataFrames to one workbook:
>>> writer = ExcelWriter('output.xlsx')
>>> df1.to_excel(writer,'Sheet1')
>>> df2.to_excel(writer,'Sheet2')
>>> writer.save()
For compatibility with to_csv, to_excel serializes lists and dicts to
strings before writing.
"""
from pandas.io.excel import ExcelWriter
need_save = False
if encoding is None:
encoding = 'ascii'
if isinstance(excel_writer, compat.string_types):
excel_writer = ExcelWriter(excel_writer, engine=engine)
need_save = True
formatter = fmt.ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatted_cells = formatter.get_formatted_cells()
excel_writer.write_cells(formatted_cells, sheet_name,
startrow=startrow, startcol=startcol)
if need_save:
excel_writer.save()
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : str or buffer
String path of file-like object
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when wirting the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(fname, self, convert_dates=convert_dates,
encoding=encoding, byteorder=byteorder,
time_stamp=time_stamp, data_label=data_label,
write_index=write_index,
variable_labels=variable_labels)
writer.write_file()
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Appender(fmt.docstring_to_string, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
@Appender(fmt.common_docstring + fmt.return_docstring, indents=1)
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=True,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.'):
"""
Render a DataFrame to a tabular environment table. You can splice
this into a LaTeX document. Requires \\usepackage{booktabs}.
`to_latex`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3
columns
longtable : boolean, default will be read from the pandas config module
default: False
Use a longtable environment instead of tabular. Requires adding
a \\usepackage{longtable} to your LaTeX preamble.
escape : boolean, default will be read from the pandas config module
default: True
When set to False prevents from escaping latex special
characters in column names.
encoding : str, default None
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
"""
# Get defaults from the pandas config
if longtable is None:
longtable = get_option("display.latex.longtable")
if escape is None:
escape = get_option("display.latex.escape")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
encoding=encoding)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Concise summary of a DataFrame.
Parameters
----------
verbose : {None, True, False}, optional
Whether to print the full summary.
None follows the `display.max_info_columns` setting.
True or False overrides the `display.max_info_columns` setting.
buf : writable buffer, defaults to sys.stdout
max_cols : int, default None
Determines whether full summary or short summary is printed.
None follows the `display.max_info_columns` setting.
memory_usage : boolean/string, default None
Specifies whether total memory usage of the DataFrame
elements (including index) should be displayed. None follows
the `display.memory_usage` setting. True or False overrides
the `display.memory_usage` setting. A value of 'deep' is equivalent
of True, with deep introspection. Memory usage is shown in
human-readable units (base-2 representation).
null_counts : boolean, default None
Whether to show the non-null counts
- If None, then only show if the frame is smaller than
max_info_rows and max_info_columns.
- If True, always show counts.
- If False, never show counts.
"""
from pandas.formats.format import _put_lines
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index.summary())
if len(self.columns) == 0:
lines.append('Empty %s' % type(self).__name__)
_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max([len(pprint_thing(k)) for k in self.columns]) + 4
counts = None
tmpl = "%s%s"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)'
% (len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns.summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f%s %s" % (num, size_qualifier, x)
num /= 1024.0
return "%3.1f%s %s" % (num, size_qualifier, 'PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]
lines.append('dtypes: %s' % ', '.join(dtypes))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if 'object' in counts or is_object_dtype(self.index):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: %s\n" %
_sizeof_fmt(mem_usage, size_qualifier))
_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""Memory usage of DataFrame columns.
Parameters
----------
index : bool
Specifies whether to include memory usage of DataFrame's
index in returned Series. If `index=True` (default is False)
the first index of the Series is `Index`.
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
sizes : Series
A series with column names as index and memory usage of
columns with units of bytes.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""Transpose index and columns"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = _unpickle_array(cols)
index = _unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
# old unpickling
(vals, idx, cols), object_state = state
index = _unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=_unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
if takeable:
series = self._iget_item_cache(col)
return _maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
return engine.get_value(series.get_values(), index)
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
try:
if takeable is True:
series = self._iget_item_cache(col)
return series.set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
def irow(self, i, copy=False):
"""
DEPRECATED. Use ``.iloc[i]`` instead
"""
warnings.warn("irow(i) is deprecated. Please use .iloc[i]",
FutureWarning, stacklevel=2)
return self._ixs(i, axis=0)
def icol(self, i):
"""
DEPRECATED. Use ``.iloc[:, i]`` instead
"""
warnings.warn("icol(i) is deprecated. Please use .iloc[:,i]",
FutureWarning, stacklevel=2)
return self._ixs(i, axis=1)
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.ix[:, lab_slice]
else:
if isinstance(label, Index):
return self.take(i, axis=1, convert=True)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._constructor_sliced.from_array(values,
index=self.index,
name=label,
fastpath=True)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def iget_value(self, i, j):
"""
DEPRECATED. Use ``.iat[i, j]`` instead
"""
warnings.warn("iget_value(i, j) is deprecated. Please use .iat[i, j]",
FutureWarning, stacklevel=2)
return self.iat[i, j]
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except:
pass
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (Series, np.ndarray, Index, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self._getitem_frame(key)
elif is_mi_columns:
return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
""" return the actual column """
# get column
if self.columns.is_unique:
return self._get_item_cache(key)
# duplicate columns & possible reduce dimensionality
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
return result
def _getitem_slice(self, key):
return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self.take(indexer, axis=0, convert=False)
else:
indexer = self.ix._convert_to_indexer(key, axis=1)
return self.take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
if len(result.columns) == 1:
top = result.columns[0]
if ((type(top) == str and top == '') or
(type(top) == tuple and top[0] == '')):
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
.. versionadded:: 0.13
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=None, **kwargs):
"""Evaluate an expression in the context of the calling DataFrame
instance.
Parameters
----------
expr : string
The expression string to evaluate.
inplace : bool
If the expression contains an assignment, whether to return a new
DataFrame or mutate the existing.
WARNING: inplace=None currently falls back to to True, but
in a future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ret : ndarray, scalar, or pandas object
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.assign
pandas.eval
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.eval('a + b')
>>> df.eval('c = a + b')
"""
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of a DataFrame including/excluding columns based on
their ``dtype``.
Parameters
----------
include, exclude : list-like
A list of dtypes or strings to be included/excluded. You must pass
in a non-empty sequence for at least one of these.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
TypeError
* If either of ``include`` or ``exclude`` is not a sequence
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types use the numpy dtype ``numpy.number``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select Pandas categorical dtypes, use 'category'
Examples
--------
>>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 0.3962 True 1
1 0.1459 False 2
2 0.2623 True 1
3 0.0764 False 2
4 -0.9703 True 1
5 -1.2094 False 2
>>> df.select_dtypes(include=['float64'])
c
0 1
1 2
2 1
3 2
4 1
5 2
>>> df.select_dtypes(exclude=['floating'])
b
0 True
1 False
2 True
3 False
4 True
5 False
"""
include, exclude = include or (), exclude or ()
if not (is_list_like(include) and is_list_like(exclude)):
raise TypeError('include and exclude must both be non-string'
' sequences')
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
_invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s' %
(include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
return self._constructor_sliced.from_array(values, index=self.index,
name=items, fastpath=True)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(key, DataFrame):
self._setitem_frame(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.ix._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.ix._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.ix._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.ix._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if key.values.size and not is_bool_dtype(key.values):
raise TypeError('Must pass DataFrame with boolean values only')
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
If `allow_duplicates` is False, raises Exception if column
is already contained in the DataFrame.
Parameters
----------
loc : int
Must have 0 <= loc <= len(columns)
column : object
value : scalar, Series, or array-like
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
.. versionadded:: 0.16.0
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your
arguments may not be preserved. The make things predicatable,
the columns are inserted in alphabetical order, at the end of
your DataFrame. Assigning multiple columns within the same
``assign`` is possible, but you cannot reference other columns
created within the same ``assign`` call.
Examples
--------
>>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
"""
data = self.copy()
# do all calculations first...
results = {}
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
# ... and then assign
for k, v in sorted(results.items()):
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex_axis(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = _possibly_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = _possibly_infer_to_datetimelike(value)
else:
# upcast the scalar
dtype, value = _infer_dtype_from_scalar(value)
value = np.repeat(value, len(self.index)).astype(dtype)
value = _possibly_cast_to_datetime(value, dtype)
# return internal types directly
if is_extension_type(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self.get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, copy, level, fill_value,
limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=NA,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method, level,
limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, copy, level, fill_value=NA,
limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, level=level,
limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algos.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).reindex(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).rename(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> indexed_df = df.set_index(['A', 'B'])
>>> indexed_df2 = df.set_index(['A', [0, 1, 2, 0, 1, 2]])
>>> indexed_df3 = df.set_index([[0, 1, 2, 0, 1, 2]])
Returns
-------
dataframe : DataFrame
"""
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index.get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col.get_level_values(n))
level = col.get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col._values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = MultiIndex.from_arrays(arrays, names=names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
"""
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.asobject.values
elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
values = values.take(labels)
if mask.any():
values, changed = _maybe_upcast_putmask(values, mask,
np.nan)
return values
new_index = _default_index(len(new_obj))
if isinstance(self.index, MultiIndex):
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if not drop:
names = self.index.names
zipped = lzip(self.index.levels, self.index.labels)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(zipped))):
col_name = names[i]
if col_name is None:
col_name = 'level_%d' % i
if multi_col:
if col_fill is None:
col_name = tuple([col_name] * self.columns.nlevels)
else:
name_lst = [col_fill] * self.columns.nlevels
lev_num = self.columns._get_level_number(col_level)
name_lst[lev_num] = col_name
col_name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
if level is None or i in level:
new_obj.insert(0, col_name, level_values)
elif not drop:
name = self.index.name
if name is None or name == 'index':
name = 'index' if 'index' not in self else 'level_0'
if isinstance(self.columns, MultiIndex):
if col_fill is None:
name = tuple([name] * self.columns.nlevels)
else:
name_lst = [col_fill] * self.columns.nlevels
lev_num = self.columns._get_level_number(col_level)
name_lst[lev_num] = name
name = tuple(name_lst)
values = _maybe_casted_values(self.index)
new_obj.insert(0, name, values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Return object with labels on given axis omitted where alternately any
or all of the data are missing
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof
Pass tuple or list to drop on multiple axes
how : {'any', 'all'}
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
int value : require that many non-NA values
subset : array-like
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include
inplace : boolean, default False
If True, do operation inplace and return None.
Returns
-------
dropped : DataFrame
"""
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self.take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
take_last : deprecated
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
take_last : deprecated
Returns
-------
duplicated : Series
"""
from pandas.core.groupby import get_group_index
from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algos.factorize(vals,
size_hint=min(len(self),
_SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
vals = (self[col].values for col in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
axis = self._get_axis_number(axis)
other_axis = 0 if axis == 1 else 1
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.groupby import _lexsort_indexer
def trans(v):
if needs_i8_conversion(v):
return v.view('i8')
return v
keys = []
for x in by:
k = self.xs(x, axis=other_axis).values
if k.ndim == 2:
raise ValueError('Cannot sort by duplicate column %s' %
str(x))
keys.append(trans(k))
indexer = _lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
else:
from pandas.core.groupby import _nargsort
by = by[0]
k = self.xs(by, axis=other_axis).values
if k.ndim == 2:
# try to be helpful
if isinstance(self.columns, MultiIndex):
raise ValueError('Cannot sort by column %s in a '
'multi-index you need to explicity '
'provide all the levels' % str(by))
raise ValueError('Cannot sort by duplicate column %s' %
str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = _nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sort(self, columns=None, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last', **kwargs):
"""
DEPRECATED: use :meth:`DataFrame.sort_values`
Sort DataFrame either by labels (along either axis) or by the values in
column(s)
Parameters
----------
columns : object
Column name(s) in frame. Accepts a column name or a list
for a nested sort. A tuple will be interpreted as the
levels of a multi-index.
ascending : boolean or list, default True
Sort ascending vs. descending. Specify list for multiple sort
orders
axis : {0 or 'index', 1 or 'columns'}, default 0
Sort index/rows versus columns
inplace : boolean, default False
Sort the DataFrame without creating a new instance
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
This option is only applied when sorting on a single column or
label.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Examples
--------
>>> result = df.sort(['A', 'B'], ascending=[1, 0])
Returns
-------
sorted : DataFrame
"""
nv.validate_sort(tuple(), kwargs)
if columns is None:
warnings.warn("sort(....) is deprecated, use sort_index(.....)",
FutureWarning, stacklevel=2)
return self.sort_index(axis=axis, ascending=ascending,
inplace=inplace)
warnings.warn("sort(columns=....) is deprecated, use "
"sort_values(by=.....)", FutureWarning, stacklevel=2)
return self.sort_values(by=columns, axis=axis, ascending=ascending,
inplace=inplace, kind=kind,
na_position=na_position)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, pls use "
".sort_values(by=...)", FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# sort by the index
if level is not None:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.groupby import _lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
if not labels.is_lexsorted():
labels = MultiIndex.from_tuples(labels.values)
indexer = _lexsort_indexer(labels.labels, orders=ascending,
na_position=na_position)
else:
from pandas.core.groupby import _nargsort
# GH11080 - Check monotonic-ness before sort an index
# if monotonic (already sorted), return None or copy() according
# to 'inplace'
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = _nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""
Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` largest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nlargest(3, 'a')
a b c
3 11 c 3
1 10 b 2
2 8 d NaN
"""
return algos.select_n_frame(self, columns, n, 'nlargest', keep)
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nsmallest(3, 'a')
a b c
4 -1 e 4
0 1 a 1
2 8 d NaN
"""
return algos.select_n_frame(self, columns, n, 'nsmallest', keep)
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
if fill_value is not None:
left_mask = isnull(left)
right_mask = isnull(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# unique
if this.columns.is_unique:
def f(col):
r = _arith_op(this[col].values, other[col].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([(col, f(col)) for col in this])
# non-unique
else:
def f(i):
r = _arith_op(this.iloc[:, i].values,
other.iloc[:, i].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([
(i, f(i)) for i, col in enumerate(this.columns)
])
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index, columns=new_columns,
copy=False)
def _combine_series(self, other, func, fill_value=None, axis=None,
level=None):
if axis is not None:
axis = self._get_axis_name(axis)
if axis == 'index':
return self._combine_match_index(other, func, level=level,
fill_value=fill_value)
else:
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value)
return self._combine_series_infer(other, func, level=level,
fill_value=fill_value)
def _combine_series_infer(self, other, func, level=None, fill_value=None):
if len(other) == 0:
return self * NA
if len(self) == 0:
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value)
def _combine_match_index(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported." %
fill_value)
return self._constructor(func(left.values.T, right.values).T,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported" %
fill_value)
new_data = left._data.eval(func=func, other=right,
axes=[left.columns, self.index])
return self._constructor(new_data)
def _combine_const(self, other, func, raise_on_error=True):
if self.empty:
return self
new_data = self._data.eval(func=func, other=other,
raise_on_error=raise_on_error)
return self._constructor(new_data)
def _compare_frame_evaluate(self, other, func, str_rep):
# unique
if self.columns.is_unique:
def _compare(a, b):
return dict([(col, func(a[col], b[col])) for col in a.columns])
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))
for i, col in enumerate(a.columns)])
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def _compare_frame(self, other, func, str_rep):
if not self._indexed_same(other):
raise ValueError('Can only compare identically-labeled '
'DataFrame objects')
return self._compare_frame_evaluate(other, func, str_rep)
def _flex_compare_frame(self, other, func, str_rep, level):
if not self._indexed_same(other):
self, other = self.align(other, 'outer', level=level, copy=False)
return self._compare_frame_evaluate(other, func, str_rep)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
func : function
fill_value : scalar value
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
Returns
-------
result : DataFrame
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isnull(series)
other_mask = isnull(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibily promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = _find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion_i = needs_i8_conversion(new_dtype)
if needs_i8_conversion_i:
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
if do_fill:
arr = _ensure_float(arr)
arr[this_mask & other_mask] = NA
# try to downcast back to the original dtype
if needs_i8_conversion_i:
# ToDo: This conversion should be handled in
# _possibly_cast_to_datetime but the change affects lot...
if is_datetime64tz_dtype(new_dtype):
arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz)
else:
arr = _possibly_cast_to_datetime(arr, new_dtype)
else:
arr = _possibly_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
def combine_first(self, other):
"""
Combine two DataFrame objects and default to non-null values in frame
calling the method. Result index columns will be the union of the
respective indexes and columns
Parameters
----------
other : DataFrame
Examples
--------
a's values prioritized, use values from b to fill holes:
>>> a.combine_first(b)
Returns
-------
combined : DataFrame
"""
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isnull(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isnull(x_values)
return expressions.where(mask, y_values, x_values,
raise_on_error=True)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify DataFrame in place using non-NA values from passed
DataFrame. Aligns on indices
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
join : {'left'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : boolean
If True, will raise an error if the DataFrame and other both
contain data in the same place.
"""
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isnull(that)
else:
if raise_conflict:
mask_this = notnull(that)
mask_that = notnull(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isnull(that)
# don't overwrite columns unecessarily
if mask.all():
continue
else:
mask = notnull(this)
self[col] = expressions.where(mask, this, that,
raise_on_error=True)
# ----------------------------------------------------------------------
# Misc methods
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][0]
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][-1]
# ----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from index / columns to form axes of the resulting
DataFrame.
Parameters
----------
index : string or object, optional
Column name to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
Column name to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns
Returns
-------
pivoted : DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
DataFrame.unstack : pivot based on the index values instead of a
column
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods
Examples
--------
>>> df = pd.DataFrame({'foo': ['one','one','one','two','two','two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6]})
>>> df
foo bar baz
0 one A 1
1 one B 2
2 one C 3
3 two A 4
4 two B 5
5 two C 6
>>> df.pivot(index='foo', columns='bar', values='baz')
A B C
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
A B C
one 1 2 3
two 4 5 6
"""
from pandas.core.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
def stack(self, level=-1, dropna=True):
"""
Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to stack, can pass level name
dropna : boolean, default True
Whether to drop rows in the resulting Frame/Series with no valid
values
Examples
----------
>>> s
a b
one 1. 2.
two 3. 4.
>>> s.stack()
one a 1
b 2
two a 3
b 4
Returns
-------
stacked : DataFrame or Series
"""
from pandas.core.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape import unstack
return unstack(self, level, fill_value)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded: 0.16.1
Returns
-------
diffed : DataFrame
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""
Applies function along input axis of DataFrame.
Objects passed to functions are Series objects having index
either the DataFrame's index (axis=0) or the columns (axis=1).
Return type depends on whether passed function aggregates, or the
reduce argument if the DataFrame is empty.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index': apply function to each column
* 1 or 'columns': apply function to each row
broadcast : boolean, default False
For aggregation functions, return object of same size with values
propagated
raw : boolean, default False
If False, convert each row or column into a Series. If raw=True the
passed function will receive ndarray objects instead. If you are
just applying a NumPy reduction function this will achieve much
better performance
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
args : tuple
Positional arguments to pass to function in addition to the
array/series
Additional keyword arguments will be passed as keywords to the function
Notes
-----
In the current implementation apply calls func twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df.apply(numpy.sqrt) # returns DataFrame
>>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)
>>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)
See also
--------
DataFrame.applymap: For elementwise operations
Returns
-------
applied : Series or DataFrame
"""
axis = self._get_axis_number(axis)
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
if len(self.columns) == 0 and len(self.index) == 0:
return self._apply_empty_result(func, axis, reduce, *args, **kwds)
if isinstance(f, np.ufunc):
with np.errstate(all='ignore'):
results = f(self.values)
return self._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
else:
if not broadcast:
if not all(self.shape):
return self._apply_empty_result(func, axis, reduce, *args,
**kwds)
if raw and not self._is_mixed_type:
return self._apply_raw(f, axis)
else:
if reduce is None:
reduce = True
return self._apply_standard(f, axis, reduce=reduce)
else:
return self._apply_broadcast(f, axis)
def _apply_empty_result(self, func, axis, reduce, *args, **kwds):
if reduce is None:
reduce = False
try:
reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds),
Series)
except Exception:
pass
if reduce:
return Series(NA, index=self._get_agg_axis(axis))
else:
return self.copy()
def _apply_raw(self, func, axis):
try:
result = lib.reduce(self.values, func, axis=axis)
except Exception:
result = np.apply_along_axis(func, axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return DataFrame(result, index=self.index, columns=self.columns)
else:
return Series(result, index=self._get_agg_axis(axis))
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if (reduce and axis == 1 and self._is_mixed_type and
self._is_datelike_mixed_type):
reduce = False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
values = self.values
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if not is_extension_type(values):
# Create a dummy Series from an empty array
index = self._get_axis(axis)
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=self._get_axis(axis),
dtype=values.dtype)
try:
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy,
labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self._ixs(i, axis=1)
for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (Series.from_array(arr, index=res_columns, name=name,
dtype=dtype)
for i, (arr, name) in enumerate(zip(values,
res_index)))
else: # pragma : no cover
raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
pprint_thing(k), )
raise
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result._convert(datetime=True, timedelta=True, copy=False)
else:
result = Series(results)
result.index = res_index
return result
def _apply_broadcast(self, func, axis):
if axis == 0:
target = self
elif axis == 1:
target = self.T
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1, got %s' % axis)
result_values = np.empty_like(target.values)
columns = target.columns
for i, col in enumerate(columns):
result_values[:, i] = func(target[col])
result = self._constructor(result_values, index=target.index,
columns=target.columns)
if axis == 1:
result = result.T
return result
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Examples
--------
>>> df = pd.DataFrame(np.random.randn(3, 3))
>>> df
0 1 2
0 -0.029638 1.081563 1.280300
1 0.647747 0.831136 -1.549481
2 0.513416 -0.884417 0.195343
>>> df = df.applymap(lambda x: '%.2f' % x)
>>> df
0 1 2
0 -0.03 1.08 1.28
1 0.65 0.83 -1.55
2 0.51 -0.88 0.20
Returns
-------
applied : DataFrame
See also
--------
DataFrame.apply : For operations on rows/columns
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
return lib.map_infer(x.asobject, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
"""
Append rows of `other` to the end of this frame, returning a new
object. Columns not in this frame are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.ix[:, self.columns]
from pandas.tools.merge import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : column name, tuple/list of column names, or array-like
Column(s) in the caller to join on the index in other,
otherwise joins index-on-index. If multiples
columns given, the passed DataFrame must have a MultiIndex. Can
pass an array as the join key if not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
preserves the index order of the calling (left) DataFrame
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.tools.merge import merge, concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
# join indexes only using concat
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
if can_concat:
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False):
from pandas.tools.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
.. versionadded:: 0.17.0
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.tools.merge import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
mat = numeric_df.values
if method == 'pearson':
correl = _algos.nancorr(_ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = _algos.nancorr_spearman(_ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = _ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = NA
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=cols, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
y : DataFrame
Notes
-----
`y` contains the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1 (unbiased estimator).
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
mat = numeric_df.values
if notnull(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = _algos.nancorr(_ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=cols, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
if isinstance(other, Series):
return self.apply(other.corr, axis=axis)
this = self._get_numeric_data()
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Return Series with number of non-NA/null observations over requested
axis. Works with non-floating point data as well (detects NaN and None)
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
numeric_only : boolean, default False
Include only float, int, boolean data
Returns
-------
count : Series (or DataFrame if level specified)
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type:
result = notnull(frame).sum(axis=axis)
else:
counts = notnull(frame.values).sum(axis=axis)
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical %s." %
self._get_axis_name(axis))
if frame._is_mixed_type:
# Since we have mixed types, calling notnull(frame.values) might
# upcast everything to object
mask = notnull(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notnull(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = _ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
axis = self._get_axis_number(axis)
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
result = self.apply(f, reduce=False)
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
"type %s not implemented." %
filter_type)
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type %s"
"not supported." % filter_type)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notnull(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = _coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be first index.
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explict about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False):
"""
Gets the mode(s) of each element along the axis selected. Empty if
nothing has 2+ occurrences. Adds a row for each mode per label, fills
in gaps with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is
the reason why a dataframe is returned. If you want to impute missing
values with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
Returns
-------
modes : DataFrame (sorted)
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})
>>> df.mode()
A
0 1
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode()
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(other)
A B
0 True False
1 False False # Column A in `other` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from collections import defaultdict
from pandas.tools.merge import concat
values = defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(lib.ismember(self.values.ravel(),
set(values)).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
# Deprecated stuff
def combineAdd(self, other):
"""
DEPRECATED. Use ``DataFrame.add(other, fill_value=0.)`` instead.
Add two DataFrame objects and do not propagate
NaN values, so if for a (column, time) one frame is missing a
value, it will default to the other frame's value (which might
be NaN as well)
Parameters
----------
other : DataFrame
Returns
-------
DataFrame
See also
--------
DataFrame.add
"""
warnings.warn("'combineAdd' is deprecated. Use "
"'DataFrame.add(other, fill_value=0.)' instead",
FutureWarning, stacklevel=2)
return self.add(other, fill_value=0.)
def combineMult(self, other):
"""
DEPRECATED. Use ``DataFrame.mul(other, fill_value=1.)`` instead.
Multiply two DataFrame objects and do not propagate NaN values, so if
for a (column, time) one frame is missing a value, it will default to
the other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
Returns
-------
DataFrame
See also
--------
DataFrame.mul
"""
warnings.warn("'combineMult' is deprecated. Use "
"'DataFrame.mul(other, fill_value=1.)' instead",
FutureWarning, stacklevel=2)
return self.mul(other, fill_value=1.)
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
_EMPTY_SERIES = Series([])
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = _default_index(lengths[0])
return _ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return _possibly_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = _default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = _default_index(len(data))
index = _ensure_index(index)
if columns is not None:
columns = _ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = _maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = _ensure_index(arr_columns).get_indexer(columns)
arr_columns = _ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_combined_index
if columns is None:
columns = _get_combined_index([
s.index for s in data if getattr(s, 'index', None) is not None
])
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = _default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = _values_from_object(s)
aligned_values.append(algos.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('%d columns passed, passed data had %s '
'columns' % (len(columns), len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = _possibly_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any([getattr(s, 'name', None) is not None for s in data])
if not has_some_name:
return _default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = _dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=NA)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
DataFrame.plot = base.AccessorProperty(gfx.FramePlotMethods,
gfx.FramePlotMethods)
DataFrame.hist = gfx.hist_frame
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None, return_type=None, **kwds):
import pandas.tools.plotting as plots
import matplotlib.pyplot as plt
ax = plots.boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,
grid=grid, rot=rot, figsize=figsize, layout=layout,
return_type=return_type, **kwds)
plt.draw_if_interactive()
return ax
DataFrame.boxplot = boxplot
ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
gpl-3.0
|
cbmoore/statsmodels
|
statsmodels/graphics/tsaplots.py
|
16
|
10392
|
"""Correlation plot functions."""
import numpy as np
from statsmodels.graphics import utils
from statsmodels.tsa.stattools import acf, pacf
def plot_acf(x, ax=None, lags=None, alpha=.05, use_vlines=True, unbiased=False,
fft=False, **kwargs):
"""Plot the autocorrelation function
Plots lags on the horizontal and the correlations on vertical axis.
Parameters
----------
x : array_like
Array of time-series values
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
lags : array_like, optional
Array of lag values, used on horizontal axis.
If not given, ``lags=np.arange(len(corr))`` is used.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett's formula. If None, no confidence intervals are plotted.
use_vlines : bool, optional
If True, vertical lines and markers are plotted.
If False, only markers are plotted. The default marker is 'o'; it can
be overridden with a ``marker`` kwarg.
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
fft : bool, optional
If True, computes the ACF via FFT.
**kwargs : kwargs, optional
Optional keyword arguments that are directly passed on to the
Matplotlib ``plot`` and ``axhline`` functions.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
matplotlib.pyplot.xcorr
matplotlib.pyplot.acorr
mpl_examples/pylab_examples/xcorr_demo.py
Notes
-----
Adapted from matplotlib's `xcorr`.
Data are plotted as ``plot(lags, corr, **kwargs)``
"""
fig, ax = utils.create_mpl_ax(ax)
if lags is None:
lags = np.arange(len(x))
nlags = len(lags) - 1
else:
nlags = lags
lags = np.arange(lags + 1) # +1 for zero lag
confint = None
# acf has different return type based on alpha
if alpha is None:
acf_x = acf(x, nlags=nlags, alpha=alpha, fft=fft,
unbiased=unbiased)
else:
acf_x, confint = acf(x, nlags=nlags, alpha=alpha, fft=fft,
unbiased=unbiased)
if use_vlines:
ax.vlines(lags, [0], acf_x, **kwargs)
ax.axhline(**kwargs)
kwargs.setdefault('marker', 'o')
kwargs.setdefault('markersize', 5)
kwargs.setdefault('linestyle', 'None')
ax.margins(.05)
ax.plot(lags, acf_x, **kwargs)
ax.set_title("Autocorrelation")
if confint is not None:
# center the confidence interval TODO: do in acf?
ax.fill_between(lags, confint[:,0] - acf_x, confint[:,1] - acf_x, alpha=.25)
return fig
def plot_pacf(x, ax=None, lags=None, alpha=.05, method='ywm',
use_vlines=True, **kwargs):
"""Plot the partial autocorrelation function
Plots lags on the horizontal and the correlations on vertical axis.
Parameters
----------
x : array_like
Array of time-series values
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
lags : array_like, optional
Array of lag values, used on horizontal axis.
If not given, ``lags=np.arange(len(corr))`` is used.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
method : 'ywunbiased' (default) or 'ywmle' or 'ols'
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
use_vlines : bool, optional
If True, vertical lines and markers are plotted.
If False, only markers are plotted. The default marker is 'o'; it can
be overridden with a ``marker`` kwarg.
**kwargs : kwargs, optional
Optional keyword arguments that are directly passed on to the
Matplotlib ``plot`` and ``axhline`` functions.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
matplotlib.pyplot.xcorr
matplotlib.pyplot.acorr
mpl_examples/pylab_examples/xcorr_demo.py
Notes
-----
Adapted from matplotlib's `xcorr`.
Data are plotted as ``plot(lags, corr, **kwargs)``
"""
fig, ax = utils.create_mpl_ax(ax)
if lags is None:
lags = np.arange(len(x))
nlags = len(lags) - 1
else:
nlags = lags
lags = np.arange(lags + 1) # +1 for zero lag
confint = None
if alpha is None:
acf_x = pacf(x, nlags=nlags, alpha=alpha, method=method)
else:
acf_x, confint = pacf(x, nlags=nlags, alpha=alpha, method=method)
if use_vlines:
ax.vlines(lags, [0], acf_x, **kwargs)
ax.axhline(**kwargs)
# center the confidence interval TODO: do in acf?
kwargs.setdefault('marker', 'o')
kwargs.setdefault('markersize', 5)
kwargs.setdefault('linestyle', 'None')
ax.margins(.05)
ax.plot(lags, acf_x, **kwargs)
ax.set_title("Partial Autocorrelation")
if confint is not None:
# center the confidence interval TODO: do in acf?
ax.fill_between(lags, confint[:,0] - acf_x, confint[:,1] - acf_x, alpha=.25)
return fig
def seasonal_plot(grouped_x, xticklabels, ylabel=None, ax=None):
"""
Consider using one of month_plot or quarter_plot unless you need
irregular plotting.
Parameters
----------
grouped_x : iterable of DataFrames
Should be a GroupBy object (or similar pair of group_names and groups
as DataFrames) with a DatetimeIndex or PeriodIndex
"""
fig, ax = utils.create_mpl_ax(ax)
start = 0
ticks = []
for season, df in grouped_x:
df = df.copy() # or sort balks for series. may be better way
df.sort()
nobs = len(df)
x_plot = np.arange(start, start + nobs)
ticks.append(x_plot.mean())
ax.plot(x_plot, df.values, 'k')
ax.hlines(df.values.mean(), x_plot[0], x_plot[-1], colors='k')
start += nobs
ax.set_xticks(ticks)
ax.set_xticklabels(xticklabels)
ax.set_ylabel(ylabel)
ax.margins(.1, .05)
return fig
def month_plot(x, dates=None, ylabel=None, ax=None):
"""
Seasonal plot of monthly data
Parameters
----------
x : array-like
Seasonal data to plot. If dates is None, x must be a pandas object
with a PeriodIndex or DatetimeIndex with a monthly frequency.
dates : array-like, optional
If `x` is not a pandas object, then dates must be supplied.
ylabel : str, optional
The label for the y-axis. Will attempt to use the `name` attribute
of the Series.
ax : matplotlib.axes, optional
Existing axes instance.
Returns
-------
matplotlib.Figure
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.elnino.load_pandas().data
>>> dta['YEAR'] = dta.YEAR.astype(int).astype(str)
>>> dta = dta.set_index('YEAR').T.unstack()
>>> dates = map(lambda x : pd.datetools.parse('1 '+' '.join(x)),
... dta.index.values)
>>> dta.index = pd.DatetimeIndex(dates, freq='M')
>>> fig = sm.graphics.tsa.month_plot(dta)
.. plot:: plots/graphics_month_plot.py
"""
from pandas import DataFrame
if dates is None:
from statsmodels.tools.data import _check_period_index
_check_period_index(x, freq="M")
else:
from pandas import Series, PeriodIndex
x = Series(x, index=PeriodIndex(dates, freq="M"))
xticklabels = ['j','f','m','a','m','j','j','a','s','o','n','d']
return seasonal_plot(x.groupby(lambda y : y.month), xticklabels,
ylabel=ylabel, ax=ax)
def quarter_plot(x, dates=None, ylabel=None, ax=None):
"""
Seasonal plot of quarterly data
Parameters
----------
x : array-like
Seasonal data to plot. If dates is None, x must be a pandas object
with a PeriodIndex or DatetimeIndex with a monthly frequency.
dates : array-like, optional
If `x` is not a pandas object, then dates must be supplied.
ylabel : str, optional
The label for the y-axis. Will attempt to use the `name` attribute
of the Series.
ax : matplotlib.axes, optional
Existing axes instance.
Returns
-------
matplotlib.Figure
"""
from pandas import DataFrame
if dates is None:
from statsmodels.tools.data import _check_period_index
_check_period_index(x, freq="Q")
else:
from pandas import Series, PeriodIndex
x = Series(x, index=PeriodIndex(dates, freq="Q"))
xticklabels = ['q1', 'q2', 'q3', 'q4']
return seasonal_plot(x.groupby(lambda y : y.quarter), xticklabels,
ylabel=ylabel, ax=ax)
if __name__ == "__main__":
import pandas as pd
#R code to run to load that dataset in this directory
#data(co2)
#library(zoo)
#write.csv(as.data.frame(list(date=as.Date(co2), co2=coredata(co2))), "co2.csv", row.names=FALSE)
co2 = pd.read_csv("co2.csv", index_col=0, parse_dates=True)
month_plot(co2.co2)
#will work when dates are sorted
#co2 = sm.datasets.get_rdataset("co2", cache=True)
x = pd.Series(np.arange(20),
index=pd.PeriodIndex(start='1/1/1990', periods=20, freq='Q'))
quarter_plot(x)
|
bsd-3-clause
|
linsalrob/EdwardsLab
|
matplotlib graphs/3d_scatter_plot.py
|
1
|
1295
|
"""
Plot a 3D scatter plot
"""
import os
import random
import sys
import matplotlib.lines
import argparse
import matplotlib.colors
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pickle
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Plot a 3D scatter plot")
parser.add_argument('-f', help='text separated data file', required=True)
parser.add_argument('-l', help='File has a header line with axis titles', action='store_true')
args = parser.parse_args()
titles = []
x = []
y = []
z = []
legends = ['what', 'x', 'y', 'z']
with open(args.f, 'r') as fin:
if args.l:
legends = fin.readline().strip().split("\t")
for l in fin:
p = l.strip().split("\t")
titles.append(p[0])
x.append(float(p[1]))
y.append(float(p[2]))
z.append(float(p[3]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z)
ax.legend()
ax.set_xlim(0, max(x))
ax.set_ylim(0, max(y))
ax.set_zlim(0, max(z))
ax.set_xlabel(legends[1])
ax.set_ylabel(legends[2])
ax.set_zlabel(legends[3])
pickle.dump(fig, open('/home/redwards/Desktop/3dfig.pickle', 'wb'))
plt.show()
|
mit
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/skimage/viewer/canvastools/recttool.py
|
43
|
8886
|
from matplotlib.widgets import RectangleSelector
from ...viewer.canvastools.base import CanvasToolBase
from ...viewer.canvastools.base import ToolHandles
__all__ = ['RectangleTool']
class RectangleTool(CanvasToolBase, RectangleSelector):
"""Widget for selecting a rectangular region in a plot.
After making the desired selection, press "Enter" to accept the selection
and call the `on_enter` callback function.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the rectangle extents as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
rect_props : dict
Properties for :class:`matplotlib.patches.Rectangle`. This class
redefines defaults in :class:`matplotlib.widgets.RectangleSelector`.
Attributes
----------
extents : tuple
Rectangle extents: (xmin, xmax, ymin, ymax).
Examples
----------
>>> from skimage import data
>>> from skimage.viewer import ImageViewer
>>> from skimage.viewer.canvastools import RectangleTool
>>> from skimage.draw import line
>>> from skimage.draw import set_color
>>> viewer = ImageViewer(data.coffee()) # doctest: +SKIP
>>> def print_the_rect(extents):
... global viewer
... im = viewer.image
... coord = np.int64(extents)
... [rr1, cc1] = line(coord[2],coord[0],coord[2],coord[1])
... [rr2, cc2] = line(coord[2],coord[1],coord[3],coord[1])
... [rr3, cc3] = line(coord[3],coord[1],coord[3],coord[0])
... [rr4, cc4] = line(coord[3],coord[0],coord[2],coord[0])
... set_color(im, (rr1, cc1), [255, 255, 0])
... set_color(im, (rr2, cc2), [0, 255, 255])
... set_color(im, (rr3, cc3), [255, 0, 255])
... set_color(im, (rr4, cc4), [0, 0, 0])
... viewer.image=im
>>> rect_tool = RectangleTool(viewer, on_enter=print_the_rect) # doctest: +SKIP
>>> viewer.show() # doctest: +SKIP
"""
def __init__(self, manager, on_move=None, on_release=None, on_enter=None,
maxdist=10, rect_props=None):
self._rect = None
props = dict(edgecolor=None, facecolor='r', alpha=0.15)
props.update(rect_props if rect_props is not None else {})
if props['edgecolor'] is None:
props['edgecolor'] = props['facecolor']
RectangleSelector.__init__(self, manager.ax, lambda *args: None,
rectprops=props)
CanvasToolBase.__init__(self, manager, on_move=on_move,
on_enter=on_enter, on_release=on_release)
# Events are handled by the viewer
try:
self.disconnect_events()
except AttributeError:
# disconnect the events manually (hack for older mpl versions)
[self.canvas.mpl_disconnect(i) for i in range(10)]
# Alias rectangle attribute, which is initialized in RectangleSelector.
self._rect = self.to_draw
self._rect.set_animated(True)
self.maxdist = maxdist
self.active_handle = None
self._extents_on_press = None
if on_enter is None:
def on_enter(extents):
print("(xmin=%.3g, xmax=%.3g, ymin=%.3g, ymax=%.3g)" % extents)
self.callback_on_enter = on_enter
props = dict(mec=props['edgecolor'])
self._corner_order = ['NW', 'NE', 'SE', 'SW']
xc, yc = self.corners
self._corner_handles = ToolHandles(self.ax, xc, yc, marker_props=props)
self._edge_order = ['W', 'N', 'E', 'S']
xe, ye = self.edge_centers
self._edge_handles = ToolHandles(self.ax, xe, ye, marker='s',
marker_props=props)
self.artists = [self._rect,
self._corner_handles.artist,
self._edge_handles.artist]
self.manager.add_tool(self)
@property
def _rect_bbox(self):
if not self._rect:
return 0, 0, 0, 0
x0 = self._rect.get_x()
y0 = self._rect.get_y()
width = self._rect.get_width()
height = self._rect.get_height()
return x0, y0, width, height
@property
def corners(self):
"""Corners of rectangle from lower left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
xc = x0, x0 + width, x0 + width, x0
yc = y0, y0, y0 + height, y0 + height
return xc, yc
@property
def edge_centers(self):
"""Midpoint of rectangle edges from left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
w = width / 2.
h = height / 2.
xe = x0, x0 + w, x0 + width, x0 + w
ye = y0 + h, y0, y0 + h, y0 + height
return xe, ye
@property
def extents(self):
"""Return (xmin, xmax, ymin, ymax)."""
x0, y0, width, height = self._rect_bbox
xmin, xmax = sorted([x0, x0 + width])
ymin, ymax = sorted([y0, y0 + height])
return xmin, xmax, ymin, ymax
@extents.setter
def extents(self, extents):
x1, x2, y1, y2 = extents
xmin, xmax = sorted([x1, x2])
ymin, ymax = sorted([y1, y2])
# Update displayed rectangle
self._rect.set_x(xmin)
self._rect.set_y(ymin)
self._rect.set_width(xmax - xmin)
self._rect.set_height(ymax - ymin)
# Update displayed handles
self._corner_handles.set_data(*self.corners)
self._edge_handles.set_data(*self.edge_centers)
self.set_visible(True)
self.redraw()
def on_mouse_release(self, event):
if event.button != 1:
return
if not self.ax.in_axes(event):
self.eventpress = None
return
RectangleSelector.release(self, event)
self._extents_on_press = None
# Undo hiding of rectangle and redraw.
self.set_visible(True)
self.redraw()
self.callback_on_release(self.geometry)
def on_mouse_press(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return
self._set_active_handle(event)
if self.active_handle is None:
# Clear previous rectangle before drawing new rectangle.
self.set_visible(False)
self.redraw()
self.set_visible(True)
RectangleSelector.press(self, event)
def _set_active_handle(self, event):
"""Set active handle based on the location of the mouse event"""
# Note: event.xdata/ydata in data coordinates, event.x/y in pixels
c_idx, c_dist = self._corner_handles.closest(event.x, event.y)
e_idx, e_dist = self._edge_handles.closest(event.x, event.y)
# Set active handle as closest handle, if mouse click is close enough.
if c_dist > self.maxdist and e_dist > self.maxdist:
self.active_handle = None
return
elif c_dist < e_dist:
self.active_handle = self._corner_order[c_idx]
else:
self.active_handle = self._edge_order[e_idx]
# Save coordinates of rectangle at the start of handle movement.
x1, x2, y1, y2 = self.extents
# Switch variables so that only x2 and/or y2 are updated on move.
if self.active_handle in ['W', 'SW', 'NW']:
x1, x2 = x2, event.xdata
if self.active_handle in ['N', 'NW', 'NE']:
y1, y2 = y2, event.ydata
self._extents_on_press = x1, x2, y1, y2
def on_move(self, event):
if self.eventpress is None or not self.ax.in_axes(event):
return
if self.active_handle is None:
# New rectangle
x1 = self.eventpress.xdata
y1 = self.eventpress.ydata
x2, y2 = event.xdata, event.ydata
else:
x1, x2, y1, y2 = self._extents_on_press
if self.active_handle in ['E', 'W'] + self._corner_order:
x2 = event.xdata
if self.active_handle in ['N', 'S'] + self._corner_order:
y2 = event.ydata
self.extents = (x1, x2, y1, y2)
self.callback_on_move(self.geometry)
@property
def geometry(self):
return self.extents
if __name__ == '__main__': # pragma: no cover
from ...viewer import ImageViewer
from ... import data
viewer = ImageViewer(data.camera())
rect_tool = RectangleTool(viewer)
viewer.show()
print("Final selection:")
rect_tool.callback_on_enter(rect_tool.extents)
|
gpl-3.0
|
rdhyee/working-open-data-2014
|
notebooks/Day_05_A_Geographical_Hierarchies.py
|
3
|
11920
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
# our usual pylab import
%pylab --no-import-all inline
# <headingcell level=1>
# Goal
# <markdowncell>
# For background, see [Mapping Census Data](http://www.udel.edu/johnmack/frec682/census/), including the
# [scan of the 10-question form](http://www.udel.edu/johnmack/frec682/census/census_form.png). Keep in mind what people were asked and the range of data available in the census.
#
# Using the census API to get an understanding of some of the geographic entities in the **2010 census**. We'll specifically be using the variable `P0010001`, the total population.
#
# What you will do in this notebook:
#
# * Sum the population of the **states** (or state-like entity like DC) to get the total population of the **nation**
# * Add up the **counties** for each **state** and validate the sums
# * Add up the **census tracts** for each **county** and validate the sums
#
# We will make use of `pandas` in this notebook.
# <markdowncell>
# I often have the following [diagram](http://www.census.gov/geo/reference/pdfs/geodiagram.pdf) in mind to help understand the relationship among entities. Also use the [list of example URLs](http://api.census.gov/data/2010/sf1/geo.html) -- it'll come in handy.
# <markdowncell>
# <a href="http://www.flickr.com/photos/raymondyee/12297467734/" title="Census Geographic Hierarchies by Raymond Yee, on Flickr"><img src="http://farm4.staticflickr.com/3702/12297467734_af8882d310_c.jpg" width="618" height="800" alt="Census Geographic Hierarchies"></a>
# <headingcell level=1>
# Working out the geographical hierarchy for Cafe Milano
# <markdowncell>
# It's helpful to have a concrete instance of a place to work with, especially when dealing with rather intangible entities like census tracts, block groups, and blocks. You can use the [American FactFinder](http://factfinder2.census.gov/faces/nav/jsf/pages/index.xhtml) site to look up for any given US address the corresponding census geographies.
#
# Let's use Cafe Milano in Berkeley as an example. You can verify the following results by typing in the address into http://factfinder2.census.gov/faces/nav/jsf/pages/searchresults.xhtml?refresh=t.
#
# https://www.evernote.com/shard/s1/sh/dc0bfb96-4965-4fbf-bc28-c9d4d0080782/2bd8c92a045d62521723347d62fa2b9d
#
# 2522 Bancroft Way, BERKELEY, CA, 94704
#
# * State: California
# * County: Alameda County
# * County Subdivision: Berkeley CCD, Alameda County, California
# * Census Tract: Census Tract 4228, Alameda County, California
# * Block Group: Block Group 1, Census Tract 4228, Alameda County, California
# * Block: Block 1001, Block Group 1, Census Tract 4228, Alameda County, California
#
# <codecell>
# YouTube video I made on how to use the American Factfinder site to look up addresses
from IPython.display import YouTubeVideo
YouTubeVideo('HeXcliUx96Y')
# <codecell>
# standard numpy, pandas, matplotlib imports
import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame, Series, Index
import pandas as pd
# <codecell>
# check that CENSUS_KEY is defined
import census
import us
import requests
import settings
assert settings.CENSUS_KEY is not None
# <markdowncell>
# The census documentation has example URLs but needs your API key to work. In this notebook, we'll use the IPython notebook HTML display mechanism to help out.
# <codecell>
c = census.Census(key=settings.CENSUS_KEY)
# <markdowncell>
# Note: we can use `c.sf1` to access 2010 census (SF1: Census Summary File 1 (2010, 2000, 1990) available in API -- 2010 is the default)
#
# see documentation: [sunlightlabs/census](https://github.com/sunlightlabs/census)
# <headingcell level=1>
# Summing up populations by state
# <markdowncell>
# Let's make a `DataFrame` named `states_df` with columns `NAME`, `P0010001` (for population), and `state` (to hold the FIPS code). **Make sure to exclude Puerto Rico.**
# <codecell>
# call the API and instantiate `df`
df = DataFrame(c.sf1.get('NAME,P0010001', geo={'for':'state:*'}))
# convert the population to integer
df['P0010001'] = df['P0010001'].astype(np.int)
df.head()
# <markdowncell>
# You can filter Puerto Rico (PR) in a number of ways -- use the way you're most comfortable with.
#
# Optional fun: filter PR in the following way
#
# * calculate a `np.array` holding the the fips of the states
# * then use [numpy.in1d](http://docs.scipy.org/doc/numpy/reference/generated/numpy.in1d.html), which is a analogous to the [in](http://stackoverflow.com/a/3437130/7782) operator to test membership in a list
# <codecell>
## FILL IN
## calculate states_fips so that PR not included
# <markdowncell>
# If `states_df` is calculated properly, the following asserts will pass silently.
# <codecell>
# check that we have three columns
assert set(states_df.columns) == set((u'NAME', u'P0010001', u'state'))
# check that the total 2010 census population is correct
assert np.sum(states_df.P0010001) == 308745538
# check that the number of states+DC is 51
assert len(states_df) == 51
# <headingcell level=1>
# Counties
# <markdowncell>
# Looking at http://api.census.gov/data/2010/sf1/geo.html, we see
#
# state-county: http://api.census.gov/data/2010/sf1?get=P0010001&for=county:*
#
# if we want to grab all counties in one go, or you can grab counties state-by-state:
#
# http://api.census.gov/data/2010/sf1?get=P0010001&for=county:*&in=state:06
#
# for all counties in the state with FIPS code `06` (which is what state?)
# <codecell>
# Here's a way to use translate
# http://api.census.gov/data/2010/sf1?get=P0010001&for=county:*
# into a call using the census.Census object
r = c.sf1.get('NAME,P0010001', geo={'for':'county:*'})
# ask yourself what len(r) means and what it should be
len(r)
# <codecell>
# let's try out one of the `census` object convenience methods
# instead of using `c.sf1.get`
r = c.sf1.state_county('NAME,P0010001',census.ALL,census.ALL)
r
# <codecell>
# convert the json from the API into a DataFrame
# coerce to integer the P0010001 column
df = DataFrame(r)
df['P0010001'] = df['P0010001'].astype('int')
# display the first records
df.head()
# <codecell>
# calculate the total population
# what happens when you google the number you get?
np.sum(df['P0010001'])
# <codecell>
# often you can use dot notation to access a DataFrame column
df.P0010001.head()
# <codecell>
## FILL IN
## compute counties_df
## counties_df should have same columns as df
## filter out PR -- what's the total population now
# <markdowncell>
# Check properties of `counties_df`
# <codecell>
# number of counties
assert len(counties_df) == 3143 #3143 county/county-equivs in US
# <codecell>
# check that the total population by adding all counties == population by adding all states
assert np.sum(counties_df['P0010001']) == np.sum(states_df.P0010001)
# <codecell>
# check we have same columns between counties_df and df
set(counties_df.columns) == set(df.columns)
# <headingcell level=1>
# Using FIPS code as the Index
# <markdowncell>
# From [Mapping Census Data](http://www.udel.edu/johnmack/frec682/census/):
#
# * Each state (SUMLEV = 040) has a 2-digit FIPS ID; Delaware's is 10.
# * Each county (SUMLEV = 050) within a state has a 3-digit FIPS ID, appended to the 2-digit state ID. New Castle County, Delaware, has FIPS ID 10003.
# * Each Census Tract (SUMLEV = 140) within a county has a 6-digit ID, appended to the county code. The Tract in New Castle County DE that contains most of the the UD campus has FIPS ID 10003014502.
# * Each Block Group (SUMLEV = 150) within a Tract has a single digit ID appended to the Tract ID. The center of campus in the northwest corner of the tract is Block Group100030145022.
# * Each Block (SUMLEV = 750) within a Block Group is identified by three more digits appended to the Block Group ID. Pearson Hall is located in Block 100030145022009.
# <codecell>
# take a look at the current structure of counties_df
counties_df.head()
# <markdowncell>
# If you add all the counties on a state-by-state basisi, do you get the same populations for each state?
#
# * use `set_index` to make the FIPS code for the state the index for `states_df`
# * calculate the FIPS code for the counties and make the county FIPS code the index of of `county_fips`
# * use groupby on `counties_df` to compare the populations of states with that in `states_df`
# <codecell>
## FILL IN
# <headingcell level=1>
# Counties in California
# <markdowncell>
# Let's look at home: California state and Alameda County
# <codecell>
# boolean indexing to pull up California
states_df[states_df.NAME == 'California']
# <codecell>
# use .ix -- most general indexing
# http://pandas.pydata.org/pandas-docs/dev/indexing.html#different-choices-for-indexing-loc-iloc-and-ix
states_df.ix['06']
# <codecell>
# California counties
counties_df[counties_df.state=='06']
# <codecell>
counties_df[counties_df.NAME == 'Alameda County']
# <codecell>
counties_df[counties_df.NAME == 'Alameda County']['P0010001']
# <markdowncell>
# Different ways to read off the population of Alameda County -- still looking for the best way
# <codecell>
counties_df[counties_df.NAME == 'Alameda County']['P0010001'].to_dict().values()[0]
# <codecell>
list(counties_df[counties_df.NAME == 'Alameda County']['P0010001'].iteritems())[0][1]
# <codecell>
int(counties_df[counties_df.NAME == 'Alameda County']['P0010001'].values)
# <markdowncell>
# If you know the FIPS code for Alameda County, just read off the population using `.ix`
# <codecell>
# this is like accessing a cell in a spreadsheet -- row, col
ALAMEDA_COUNTY_FIPS = '06001'
counties_df.ix[ALAMEDA_COUNTY_FIPS,'P0010001']
# <headingcell level=1>
# Reading off all the tracts in Alameda County
# <codecell>
counties_df.ix[ALAMEDA_COUNTY_FIPS,'county']
# <codecell>
## FILL IN
## generate a DataFrame named alameda_county_tracts_df by
## calling the census api and the state-county-tract technique
## how many census tracts in Alameda County?
## if you add up the population, what do you get?
## generate the FIPS code for each tract
# <codecell>
# confirm that you can find the census tract in which Cafe Milano is located
# Cafe Milano is in tract 4228
MILANO_TRACT_ID = '422800'
alameda_county_tracts_df[alameda_county_tracts_df.tract==MILANO_TRACT_ID]
# <headingcell level=1>
# Using Generators to yield all the tracts in the country
# <markdowncell>
# http://www.jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/
# <codecell>
## FILL IN
## try to reproduce the generator I show in class for all the census tracts
## start to think about how to do this for other geographical entities
import time
import us
from itertools import islice
def census_tracts(variable=('NAME','P0010001'), sleep_time=1.0):
for state in us.states.STATES:
print state
for tract in c.sf1.get(variable,
geo={'for':"tract:*",
'in':'state:{state_fips}'.format(state_fips=state.fips)
}):
yield tract
# don't hit the API more than once a second
time.sleep(sleep_time)
# limit the number of tracts we crawl for until we're reading to get all of them
tracts_df = DataFrame(list(islice(census_tracts(), 100)))
tracts_df['P0010001'] = tracts_df['P0010001'].astype('int')
# <codecell>
tracts_df.head()
# <codecell>
## EXERCISE for next time
## write a generator all census places
# <headingcell level=1>
# Compare with Tabulations
# <markdowncell>
# We can compare the total number of tracts we calculate to:
#
# https://www.census.gov/geo/maps-data/data/tallies/tractblock.html
#
# and
#
# https://www.census.gov/geo/maps-data/data/docs/geo_tallies/Tract_Block2010.txt
|
apache-2.0
|
rbnvrw/semtracking
|
semtracking/report.py
|
1
|
3303
|
import numpy
from pandas import DataFrame
import pandas as pd
from os import path, makedirs, sep
import os
import re
def prepare_dataframe(data_frame, microns_per_pixel):
# drop unneeded columns
allowed_cols = ['r', 'dev', 'x', 'y']
cols = [c for c in data_frame.columns if c in allowed_cols]
data_frame = data_frame[cols]
# convert to microns
data_frame *= microns_per_pixel
return data_frame
def setup_dir_is_not_exists(filename):
"""
:param filename:
:return:
"""
directory = path.abspath(path.normpath(path.dirname(filename) + sep + 'report'))
if not path.exists(directory):
makedirs(directory)
return directory
def save_circles_to_csv_grouped(data_frame, filename, microns_per_pixel, suffix='_grouped'):
"""
:param data_frame:
:param filename:
:param microns_per_pixel:
"""
if data_frame.empty:
return
directory = setup_dir_is_not_exists(filename)
filename = path.basename(filename)
data_frame = prepare_dataframe(data_frame, microns_per_pixel)
# Paths
file_path_grouped = path.abspath(path.normpath(directory + sep + re.sub("_\d+$", "", filename)))
report_file_grouped = file_path_grouped + suffix + '_report.csv'
summary_file_grouped = file_path_grouped + suffix + '_summary.csv'
# Merge existing
if path.isfile(report_file_grouped):
existing_df = DataFrame.from_csv(report_file_grouped)
data_frame = pd.concat([data_frame, existing_df], ignore_index=True)
# Delete summary
if path.isfile(summary_file_grouped):
os.remove(summary_file_grouped)
# save DataFrame
data_frame.to_csv(report_file_grouped, encoding='utf-8')
# create summary
summary = generate_summary(data_frame)
# save
summary.to_csv(summary_file_grouped, encoding='utf-8')
def save_circles_to_csv(data_frame, filename, microns_per_pixel):
"""
Save fitted circles to csv files in subdir
:param data_frame:
:param filename:
:param microns_per_pixel:
"""
if data_frame.empty:
return
directory = setup_dir_is_not_exists(filename)
filename = path.basename(filename)
data_frame = prepare_dataframe(data_frame, microns_per_pixel)
# File paths
file_path = path.abspath(path.normpath(directory + sep + filename))
report_file = file_path + '_frame.csv'
summary_file = file_path + '_summary.csv'
# save DataFrame
data_frame.to_csv(report_file, encoding='utf-8')
# create summary
summary = generate_summary(data_frame)
# save
summary.to_csv(summary_file, encoding='utf-8')
def generate_summary(data_frame):
"""
:param data_frame:
:return:
"""
number = len(data_frame.index)
mean_r = numpy.mean(data_frame['r'])
error_r = numpy.sqrt(numpy.dot(data_frame['dev'], data_frame['dev'])) / float(number)
std_r = numpy.std(data_frame['r'])
data = {
'R, mean (um)': [mean_r],
'R, error (um)': [error_r],
'R, std (um)': [std_r],
'D, mean (um)': [2.0 * mean_r],
'D, error (um)': [2.0 * error_r],
'D, std (um)': [2.0 * std_r],
'D, std (fraction)': [std_r / mean_r],
'# particles': [number]
}
summary = DataFrame(data)
return summary
|
mit
|
nmayorov/scikit-learn
|
sklearn/gaussian_process/tests/test_gpc.py
|
28
|
6061
|
"""Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils.testing import (assert_true, assert_greater, assert_equal,
assert_almost_equal, assert_array_equal)
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
def test_predict_consistent():
""" Check binary predict decision has also predicted probability above 0.5.
"""
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel: continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel: continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4)
| (gpc.kernel_.theta == gpc.kernel_.bounds[:, 0])
| (gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])))
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(9):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_custom_optimizer():
""" Test that GPC can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 1000 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(1000):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel: continue
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_multi_class():
""" Test GPC for multi-class classification problems. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
def test_multi_class_n_jobs():
""" Test that multi-class GPC produces identical results with n_jobs>1. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
|
bsd-3-clause
|
rgommers/statsmodels
|
statsmodels/datasets/randhie/data.py
|
25
|
2667
|
"""RAND Health Insurance Experiment Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is in the public domain."""
TITLE = __doc__
SOURCE = """
The data was collected by the RAND corporation as part of the Health
Insurance Experiment (HIE).
http://www.rand.org/health/projects/hie.html
This data was used in::
Cameron, A.C. amd Trivedi, P.K. 2005. `Microeconometrics: Methods
and Applications,` Cambridge: New York.
And was obtained from: <http://cameron.econ.ucdavis.edu/mmabook/mmadata.html>
See randhie/src for the original data and description. The data included
here contains only a subset of the original data. The data varies slightly
compared to that reported in Cameron and Trivedi.
"""
DESCRSHORT = """The RAND Co. Health Insurance Experiment Data"""
DESCRLONG = """"""
NOTE = """::
Number of observations - 20,190
Number of variables - 10
Variable name definitions::
mdvis - Number of outpatient visits to an MD
lncoins - ln(coinsurance + 1), 0 <= coninsurance <= 100
idp - 1 if individual deductible plan, 0 otherwise
lpi - ln(max(1, annual participation incentive payment))
fmde - 0 if idp = 1; ln(max(1, MDE/(0.01 coinsurance))) otherwise
physlm - 1 if the person has a physical limitation
disea - number of chronic diseases
hlthg - 1 if self-rated health is good
hlthf - 1 if self-rated health is fair
hlthp - 1 if self-rated health is poor
(Omitted category is excellent self-rated health)
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
PATH = '%s/%s' % (dirname(abspath(__file__)), 'randhie.csv')
def load():
"""
Loads the RAND HIE data and returns a Dataset class.
----------
endog - response variable, mdvis
exog - design
Returns
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Loads the RAND HIE data and returns a Dataset class.
----------
endog - response variable, mdvis
exog - design
Returns
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
from pandas import read_csv
data = read_csv(PATH)
return du.process_recarray_pandas(data, endog_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(PATH, "rb"), delimiter=",", names=True, dtype=float)
return data
|
bsd-3-clause
|
meren/anvio
|
anvio/db.py
|
1
|
36410
|
# -*- coding: utf-8
# pylint: disable=line-too-long
"""
Low-level db operations.
"""
import os
import time
import math
import numpy
import pandas as pd
import sqlite3
import warnings
import anvio
import anvio.tables as tables
import anvio.terminal as terminal
import anvio.filesnpaths as filesnpaths
from anvio.errors import ConfigError
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "A. Murat Eren"
__email__ = "[email protected]"
__status__ = "Development"
# Converts numpy numbers into storable python types that sqlite3 is expecting
sqlite3.register_adapter(numpy.int64, int)
sqlite3.register_adapter(numpy.float64, float)
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_list_in_chunks(input_list, num_items_in_each_chunk=5000):
"""Yield smaller bits of a list"""
for index in range(0, len(input_list), num_items_in_each_chunk):
yield input_list[index:index + num_items_in_each_chunk]
class DB:
def __init__(self, db_path, client_version, new_database=False, ignore_version=False, skip_rowid_prepend=False, run=terminal.Run(), progress=terminal.Progress()):
self.db_path = db_path
self.version = None
self.run = run
self.progress = progress
# these anonymous functions report whether the ROWID will be added
# to its rows read from the database or not. if the first column of a given
# table does not contain unique variables, anvi'o prepends the ROWID of each
# column to index 0, unless `skip_rowid_prepend` is True
self.ROWID_PREPENDS_ROW_DATA = lambda table_name: False if skip_rowid_prepend else tables.requires_unique_entry_id[table_name]
self.PROPER_SELECT_STATEMENT = lambda table_name: 'ROWID as "entry_id", *' if self.ROWID_PREPENDS_ROW_DATA(table_name) else '*'
if new_database:
filesnpaths.is_output_file_writable(db_path)
else:
filesnpaths.is_file_exists(db_path)
if new_database and os.path.exists(self.db_path):
os.remove(self.db_path)
self.check_if_db_writable()
try:
self.conn = sqlite3.connect(self.db_path)
except Exception as e:
raise ConfigError(f"This one time someone was not happy with '{self.db_path}' and '{e}', they said.")
self.conn.text_factory = str
self.cursor = self.conn.cursor()
self.table_names_in_db = self.get_table_names()
if new_database:
self.create_self()
self.set_version(client_version)
else:
self.version = self.get_version()
if str(self.version) != str(client_version) and not ignore_version:
if int(self.version) > int(client_version):
progress.reset()
raise ConfigError("Bad news of the day: the database at %s was generated with an anvi'o version that is 'newer' than "
"the one you are actively using right now. We know, you hate to hear this, but you need to upgrade "
"your anvi'o :(" % self.db_path)
else:
progress.reset()
raise ConfigError(f"The database at '{self.db_path}' is outdated (this database is v{self.version} and your anvi'o installation "
f"wants to work with v{client_version}). You can migrate your database without losing any data using the "
f"program `anvi-migrate` with either of the flags `--migrate-dbs-safely` or `--migrate-dbs-quickly`.")
bad_tables = [table_name for table_name in self.table_names_in_db if table_name not in tables.requires_unique_entry_id]
if len(bad_tables):
raise ConfigError("You better be a programmer tinkering with anvi'o databases adding new tables or something. Otherwise we "
"have quite a serious problem :/ Each table in a given anvi'o database must have an entry in the "
"anvio/tables/__init__.py dictionary `requires_unique_entry_id` to explicitly define whether anvi'o "
"should add a unique entry id for its contents upon retrieval as a dictionary. The following tables "
"in this database do not satisfy that: '%s'. You can solve this problem by adding an entry into that "
"dictionary." % (', '.join(bad_tables)))
def get_version(self):
try:
return self.get_meta_value('version')
except:
raise ConfigError("%s does not seem to be a database generated by anvi'o :/" % self.db_path)
def check_if_db_writable(self):
check_counter = 0
check_interval = 1 # in seconds
check_limit = 300 # 5 minutes, in seconds
journal_path = self.db_path + '-journal'
while(check_counter < check_limit and filesnpaths.is_file_exists(journal_path, dont_raise=True)):
if check_counter == 0:
# print only once
self.run.info_single("It seems the database at '%s' currently used by another proccess "
"for writing operations. Anvi'o refuses to work with this database to avoid corrupting it. "
"If you think this is a mistake, you may stop this process and delete the lock file at '%s' after making sure "
"no other active process using it for writing. In case this program is ran by automatic workflow manager like snakemake "
"Anvi'o will periodically check if the journal file still exists for total of %d minutes. If database is still not writable "
"after that time, Anvi'o will stop running. " % (os.path.abspath(self.db_path), os.path.abspath(journal_path), int(check_limit/60)))
time.sleep(check_interval)
check_counter += check_interval
if not check_counter < check_limit:
raise ConfigError("Database is not writable.")
def create_self(self):
self._exec('''CREATE TABLE self (key text, value text)''')
def drop_table(self, table_name):
"""Delete a table in the database if it exists"""
self._exec('''DROP TABLE IF EXISTS %s;''' % table_name)
def create_table(self, table_name, fields, types):
if len(fields) != len(types):
raise ConfigError("create_table: The number of fields and types has to match.")
db_fields = ', '.join(['%s %s' % (t[0], t[1]) for t in zip(fields, types)])
self._exec('''CREATE TABLE %s (%s)''' % (table_name, db_fields))
self.commit()
self.table_names_in_db = self.get_table_names()
def set_version(self, version):
self.set_meta_value('version', version)
self.commit()
def set_meta_value(self, key, value):
self.remove_meta_key_value_pair(key)
self._exec('''INSERT INTO self VALUES(?,?)''', (key, value,))
self.commit()
def remove_meta_key_value_pair(self, key):
self._exec('''DELETE FROM self WHERE key="%s"''' % key)
self.commit()
def update_meta_value(self, key, value):
self.remove_meta_key_value_pair(key)
self.set_meta_value(key, value)
def copy_paste(self, table_name, source_db_path, append=False):
"""Copy `table_name` data from another database (`source_db_path`) into yourself
Arguments
=========
append : bool, False
If True, the table is appened to the source DB, rather than replaced.
"""
source_db = DB(source_db_path, None, ignore_version=True)
num_entries_in_source = source_db.get_row_counts_from_table(table_name)
if not num_entries_in_source:
return
# we are done with the source DB python object. The rest we do in SQL
# for huge performance gains
source_db.disconnect()
if not append:
self._exec('''DELETE FROM %s''' % table_name)
self._exec('''ATTACH "%s" AS source_db''' % source_db_path)
self._exec('''INSERT INTO main.%s SELECT * FROM source_db.%s''' % (table_name, table_name))
self._exec('''DETACH DATABASE "source_db"''')
def get_max_value_in_column(self, table_name, column_name, value_if_empty=None, return_min_instead=False):
"""Get the maximum OR minimum column value in a table
Parameters
==========
value_if_empty : object, None
If not None and table has no entries, value returned is value_if_empty.
"""
response = self._exec("""SELECT %s(%s) FROM %s""" % ('MIN' if return_min_instead else 'MAX', column_name, table_name))
rows = response.fetchall()
val = rows[0][0]
if isinstance(val, type(None)):
return value_if_empty
try:
val = int(val)
except ValueError:
pass
return val
def get_meta_value(self, key, try_as_type_int=True, return_none_if_not_in_table=False):
"""if try_as_type_int, value is attempted to be converted to integer. If it fails, no harm no foul."""
response = self._exec("""SELECT value FROM self WHERE key='%s'""" % key)
rows = response.fetchall()
if not rows and return_none_if_not_in_table:
return None
if not rows:
raise ConfigError("A value for '%s' does not seem to be set in table 'self'." % key)
val = rows[0][0]
if isinstance(val, type(None)):
return None
if try_as_type_int:
try:
val = int(val)
except ValueError:
pass
return val
def commit(self):
self.conn.commit()
def disconnect(self):
self.conn.commit()
self.conn.close()
def _exec(self, sql_query, value=None):
if value:
ret_val = self.cursor.execute(sql_query, value)
else:
ret_val = self.cursor.execute(sql_query)
self.commit()
return ret_val
def _exec_many(self, sql_query, values):
chunk_counter = 0
for chunk in get_list_in_chunks(values):
if anvio.DEBUG:
self.progress.reset()
self.run.info_single("Adding the chunk %d with %d entries of %d total is being added to the db with "
"the SQL command '%s'." \
% (chunk_counter, len(chunk), len(values), sql_query), nl_before=1)
self.cursor.executemany(sql_query, chunk)
chunk_counter += 1
return True
def insert(self, table_name, values=()):
query = '''INSERT INTO %s VALUES (%s)''' % (table_name, ','.join(['?'] * len(values)))
return self._exec(query, values)
def insert_many(self, table_name, entries=None):
if len(entries):
query = '''INSERT INTO %s VALUES (%s)''' % (table_name, ','.join(['?'] * len(entries[0])))
return self._exec_many(query, entries)
def insert_rows_from_dataframe(self, table_name, dataframe, raise_if_no_columns=True):
"""Insert rows from a dataframe
Parameters
==========
raise_if_no_columns : bool, True
If True, if dataframe has no columns (e.g. dataframe = pd.DataFrame({})), this function
returns without raising error.
Notes
=====
- This should one day be replaced with the following code:
if 'entry_id' in structure:
# This table has an entry_id of, we have to be aware of it
if 'entry_id' in df.columns:
# The user already has an 'entry_id' column. We assume they know what they are doing
next_available_id = df['entry_id'].max() + 1
else:
num_entries = df.shape[0]
next_available_id = self.get_max_value_in_column(name, 'entry_id', value_if_empty=-1) + 1
df['entry_id'] = range(next_available_id, next_available_id + num_entries)
next_available_id += num_entries
else:
next_available_id = None
# subset columns and reorder according to the table structure
df = df[structure]
dtypes = dict(zip(structure, types))
df.to_sql(
name,
self.conn,
if_exists='append',
chunksize=chunksize,
dtype=dtypes,
index=False
)
return next_available_id
"""
self.is_table_exists(table_name)
if not list(dataframe.columns) and not raise_if_no_columns:
# if the dataframe has no colums, we just return
return
if len(set(dataframe.columns)) != len(list(dataframe.columns)):
raise ConfigError("insert_rows_from_dataframe :: There is at least one duplicate column "
"name in the dataframe. Here is the list of columns: [{}].".\
format(", ".join(list(dataframe.columns))))
if set(dataframe.columns) != set(self.get_table_structure(table_name)):
raise ConfigError("insert_rows_from_dataframe :: The columns in the dataframe "
"do not equal the columns of the requested table. "
"The columns from each are respectively ({}); and ({}).".\
format(", ".join(list(dataframe.columns)),
", ".join(self.get_table_structure(table_name))))
# conform to the column order of the table structure
dataframe = dataframe[self.get_table_structure(table_name)]
entries = [tuple(row) for row in dataframe.values]
self.insert_many(table_name, entries=entries)
def is_table_exists(self, table_name):
if table_name not in self.table_names_in_db:
raise ConfigError(f"The database at {self.db_path} does seem to have a table `{table_name}` :/ "
f"Here is a list of table names this database knows: {', '.join(self.table_names_in_db)}")
def get_all_rows_from_table(self, table_name):
self.is_table_exists(table_name)
response = self._exec('''SELECT %s FROM %s''' % (self.PROPER_SELECT_STATEMENT(table_name), table_name))
return response.fetchall()
def get_some_rows_from_table(self, table_name, where_clause):
self.is_table_exists(table_name)
where_clause = where_clause.replace('"', "'")
response = self._exec('''SELECT %s FROM %s WHERE %s''' % (self.PROPER_SELECT_STATEMENT(table_name), table_name, where_clause))
return response.fetchall()
def get_row_counts_from_table(self, table_name, where_clause=None):
self.is_table_exists(table_name)
if where_clause:
where_clause = where_clause.replace('"', "'")
response = self._exec('''SELECT COUNT(*) FROM %s WHERE %s''' % (table_name, where_clause))
else:
response = self._exec('''SELECT COUNT(*) FROM %s''' % (table_name))
return response.fetchall()[0][0]
def remove_some_rows_from_table(self, table_name, where_clause):
self.is_table_exists(table_name)
where_clause = where_clause.replace('"', "'")
self._exec('''DELETE FROM %s WHERE %s''' % (table_name, where_clause))
self.commit()
def get_single_column_from_table(self, table, column, unique=False, where_clause=None):
self.is_table_exists(table)
if where_clause:
where_clause = where_clause.replace('"', "'")
response = self._exec('''SELECT %s %s FROM %s WHERE %s''' % ('DISTINCT' if unique else '', column, table, where_clause))
else:
response = self._exec('''SELECT %s %s FROM %s''' % ('DISTINCT' if unique else '', column, table))
return [t[0] for t in response.fetchall()]
def get_some_columns_from_table(self, table, comma_separated_column_names, unique=False, where_clause=None):
self.is_table_exists(table)
if where_clause:
where_clause = where_clause.replace('"', "'")
response = self._exec('''SELECT %s %s FROM %s WHERE %s''' % ('DISTINCT' if unique else '', comma_separated_column_names, table, where_clause))
else:
response = self._exec('''SELECT %s %s FROM %s''' % ('DISTINCT' if unique else '', comma_separated_column_names, table))
return response.fetchall()
def get_frequencies_of_values_from_a_column(self, table_name, column_name):
self.is_table_exists(table_name)
response = self._exec('''select %s, COUNT(*) from %s group by %s''' % (column_name, table_name, column_name))
return response.fetchall()
def get_table_column_types(self, table_name):
self.is_table_exists(table_name)
response = self._exec('PRAGMA TABLE_INFO(%s)' % table_name)
return [t[2] for t in response.fetchall()]
def get_table_columns_and_types(self, table_name):
self.is_table_exists(table_name)
response = self._exec('PRAGMA TABLE_INFO(%s)' % table_name)
return dict([(t[1], t[2]) for t in response.fetchall()])
def get_table_structure(self, table_name):
self.is_table_exists(table_name)
response = self._exec('''SELECT * FROM %s''' % table_name)
return [t[0] for t in response.description]
def get_table_as_list_of_tuples(self, table_name, table_structure=None):
return self.get_all_rows_from_table(table_name)
def smart_get(self, table_name, column=None, data=None, string_the_key=False, error_if_no_data=True, progress=None, omit_parent_column=False):
"""A wrapper function for `get_*_table_as_dict` and that is not actually that smart.
If the user is interested in only some of the data, they can build a where clause
and use `get_some_rows_from_table_as_dict`. If the user is interested in the entire
table data, then they would call `get_table_as_dict`. But in situations where it is
not certain whether there will be a where clause, the if/else statements clutter the
code. Here is an example:
----8<-------8<-------8<-------8<-------8<-------8<-------8<-------8<-------8<-------8<-------8<-------
def func(items_of_interest=None):
(...)
if items_of_interest:
where_clause = 'column_name IN (%s)' % (','.join(['"%s"' % item for item in items_of_interest]))
d = get_some_rows_from_table_as_dict(table_name, where_clause=where_clause)
else:
d = get_table_as_dict(table_name)
(...)
---->8------->8------->8------->8------->8------->8------->8------->8------->8------->8------->8-------
This function cleans up this mess as this call is equivalent to the example code above:
----8<-------8<-------8<-------8<-------8<-------8<-------8<-------8<-------8<-------8<-------8<-------
def func(items_of_interest=None):
(...)
smart_get(table_name, column_name, items_of_interest)
(...)
---->8------->8------->8------->8------->8------->8------->8------->8------->8------->8------->8-------
Paremeters
==========
table_name: str
The anvi'o data table name
column: str
The column name that will be used to select from table
data: set
A set of item names of interest. If the set is empty, the function will return the entire content of `table_name`
"""
table_columns_and_types = self.get_table_columns_and_types(table_name)
if column not in table_columns_and_types:
raise ConfigError(f"The column name `{column}` is not in table `{table_name}` :/")
if column and data:
if table_columns_and_types[column] in ["numeric", "integer"]:
items = ','.join([str(d) for d in data])
else:
items = ','.join(['"%s"' % d for d in data])
if progress:
progress.update(f'Reading **SOME** data from `{table_name.replace("_", " ")}` table :)')
return self.get_some_rows_from_table_as_dict(table_name, where_clause=f"{column} IN ({items})", string_the_key=string_the_key, error_if_no_data=error_if_no_data, omit_parent_column=omit_parent_column)
else:
if progress:
progress.update(f'Reading **ALL** data from `{table_name.replace("_", " ")}` table :(')
return self.get_table_as_dict(table_name, string_the_key=string_the_key, error_if_no_data=error_if_no_data, omit_parent_column=omit_parent_column)
def get_table_as_dict(self, table_name, string_the_key=False, columns_of_interest=None, keys_of_interest=None, omit_parent_column=False, error_if_no_data=True, log_norm_numeric_values=False):
if self.ROWID_PREPENDS_ROW_DATA(table_name):
table_structure = ['entry_id'] + self.get_table_structure(table_name)
else:
table_structure = self.get_table_structure(table_name)
columns_to_return = list(range(0, len(table_structure)))
if columns_of_interest and not isinstance(columns_of_interest, type([])):
raise ConfigError("The parameter `columns_of_interest` must be of type <list>.")
if omit_parent_column:
if '__parent__' in table_structure:
columns_to_return.remove(table_structure.index('__parent__'))
table_structure.remove('__parent__')
if columns_of_interest:
for col in table_structure[1:]:
if col not in columns_of_interest:
columns_to_return.remove(table_structure.index(col))
if len(columns_to_return) == 1:
if error_if_no_data:
raise ConfigError("get_table_as_dict :: after removing an column that was not mentioned in the columns "
"of interest by the client, nothing was left to return...")
else:
return {}
rows = self.get_all_rows_from_table(table_name)
#-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----
#
# SAD TABLES BEGIN
#
# NOTE from the past:
# FIXME FIXME FIXME FIXME FIXME FIXME FIXME FIXME FIXME FIXME FIXME FIXME
# this is one of the most critical design mistakes that remain in anvi'o. we set `entry_id` values
# in table classes depending on the data that will be entered into the database. this is how it goes:
# anvi'o learns the highest entry id in a table (to which it is about to enter some data), for each db
# entry assigns a new `entry_id`, enters the data. it is all good when there is a single process doing it.
# but when there are multiple processes running in parallel, sometimes race conditions occur: two processes
# learn the max entry id about the same time, and when they finally enter the data to the db, some entries
# end up not being unique. this is a toughie because sometimes entry ids are used to connect distinct
# information from different tables, so they must be known before the data goes into the database, etc.
# when these race conditions occur, anvi'o gives an error telling the user kindly that they are fucked. but in
# some cases it is possible to recover from that (THE CODE BELOW TRIES TO DO THAT) by reassigning all ids on the
# fly to the resulting data dictionary (i.e., not paying atention to entry ids in the database and basically using
# new ones to avoid key information to not be overwritten due to the lack of unique entry ids which become keys for
# the data dictionary). in other cases there are no ways to fix it, such as for HMM tables.. The ACTUAL SOLUTION to\
# this is to remove `entry_id` columns from every table in anvi'o, and using SQLite indexes as entry ids.
#
# NOTE from the future
# Every SQLite table has an implicit column called ROWID. Does this solve our problem?
#
# NOTE from a more recent future: we no longer have the entry_id problem for most tables .. except
# the hmm_hits table. the reason it has to be there is because we need to know the precise entry ids for
# hmm hits to be able to track them in splits. there probably are better ways to do that. So here I am leaving
# a FIXME. once this is resolved, the entry_id routines in Table base class can be deleted safely. until then,
# we will suffer from race conditions occasionally, and this embarrassment will stay here in the code..
if table_name == tables.hmm_hits_table_name:
unique_keys = set([r[0] for r in rows])
if len(unique_keys) != len(rows):
if anvio.FIX_SAD_TABLES:
if 'hmm' in table_name:
raise ConfigError("You asked anvi'o to fix sad tables, but the sad table you're trying to fix happens to "
"be related to HMM operations in anvi'o, where supposedly unique entries tie together "
"multiple tables. Long story short, solving this while ensuring everything is done right "
"is quite difficult and there is no reason to take any risks. The best you can do is to "
"remove all HMMs from your contigs database, and re-run them with a single instance of "
"`anvi-run-hmms` command (you can use multiple threads, but you shouldn't send multiple "
"`anvi-run-hmms` to your cluster to be run on the same contigs database in parallel -- "
"that's what led you to this point at the first place). Apologies for this bioinformatics "
"poo poo :( It is all on us.")
self.run.info_single("You have sad tables. You have used `--fix-sad-tables` flag. Now anvi'o will try to fix them...", mc="red")
# here we will update the rows data with a small memory fingerprint:
entry_id_counter = 0
for i in range(0, len(rows)):
row = rows[i]
rows[i] = [entry_id_counter] + list(row[1:])
entry_id_counter += 1
# now we will remove the previous table, and enter the new data with up-to-date entry ids
table_structure = self.get_table_structure(table_name)
# delete the table content *gulp*
self._exec('''DELETE FROM %s''' % table_name)
# enter corrected data
self._exec_many('''INSERT INTO %s VALUES (%s)''' % (table_name, ','.join(['?'] * len(table_structure))), rows)
self.run.info_single("If you are seeing this line, it means anvi'o managed to fix those sad tables. No more sad! "
"But please make double sure that nothing looks funny in your results. If you start getting "
"errors and you wish to contact us for that, please don't forget to mention that you did try "
"to fix your sad tables.", mc="green")
else:
raise ConfigError("This is one of the core functions of anvi'o you never want to hear from, but there seems "
"to be something wrong with the table '%s' that you are trying to read from. While there "
"are %d items in this table, there are only %d unique keys, which means some of them are "
"going to be overwritten when this function creates a final dictionary of data to return. "
"This often happens when the user runs multiple processes in parallel that tries to write "
"to the same table. For instance, running a separate instance of `anvi-run-hmms` on the same "
"contigs database with different HMM profiles. Anvi'o is very sad for not handling this "
"properly, but such database tables need fixin' before things can continue :( If you would "
"like anvi'o to try to fix this, please run the same command you just run with the flag "
"`--fix-sad-tables`. If you do that it is a great idea to backup your original database "
"and then very carefully check the results to make sure things do not look funny." \
% (table_name, len(rows), len(unique_keys)))
#
# SAD TABLES END
#
#----->8----->8----->8----->8----->8----->8----->8----->8----->8----->8----->8----->8----->8----->8----->8-----
results_dict = {}
if keys_of_interest:
keys_of_interest = set(keys_of_interest)
for row in rows:
entry = {}
if keys_of_interest:
if row[0] in keys_of_interest:
# so we are interested in keeping this, reduce the size of the
# hash size to improve the next inquiry, and keep going.
keys_of_interest.remove(row[0])
else:
# we are not intersted in this one, continue:
continue
for i in columns_to_return[1:]:
value = row[i]
if log_norm_numeric_values:
if type(value) == float or type(value) == int:
entry[table_structure[i]] = math.log10(value + 1)
else:
entry[table_structure[i]] = value
if string_the_key:
results_dict[str(row[0])] = entry
else:
results_dict[row[0]] = entry
return results_dict
def get_table_as_dataframe(self, table_name, where_clause=None, columns_of_interest=None, drop_if_null=False, error_if_no_data=True):
"""Get the table as a pandas DataFrame object
Parameters
==========
table_name : str
where_clause : str, None
SQL WHERE clause. If None, everything is fetched.
columns_of_interest : list, None
Which columns do you want to return? If None, all are returned. Applied after where_clause.
drop_if_null : bool, False
Drop columns if they contain all NULL values, i.e. np.nan, or ''
error_if_no_data : bool, True
Raise an error if the dataframe has 0 rows. Checked after where_clause.
"""
if self.ROWID_PREPENDS_ROW_DATA(table_name):
table_structure = ['entry_id'] + self.get_table_structure(table_name)
else:
table_structure = self.get_table_structure(table_name)
if columns_of_interest:
columns_of_interest = list(columns_of_interest)
else:
columns_of_interest = table_structure
if where_clause:
where_clause = where_clause.replace('"', "'")
results_df = pd.read_sql('''SELECT %s FROM "%s" WHERE %s''' % (self.PROPER_SELECT_STATEMENT(table_name), table_name, where_clause), self.conn, columns=table_structure)
else:
results_df = pd.read_sql('''SELECT %s FROM "%s"''' % (self.PROPER_SELECT_STATEMENT(table_name), table_name), self.conn, columns=table_structure)
if results_df.empty and error_if_no_data:
raise ConfigError("DB.get_table_as_dataframe :: The dataframe requested is empty")
if drop_if_null:
for col in columns_of_interest.copy():
if results_df[col].isna().all():
# Column contains only entries that equate to pandas NA
columns_of_interest.remove(col)
elif (results_df[col] == '').all():
# Column contains all empty strings
columns_of_interest.remove(col)
return results_df[columns_of_interest]
def get_some_rows_from_table_as_dict(self, table_name, where_clause, error_if_no_data=True, string_the_key=False, row_num_as_key=False, omit_parent_column=False):
"""This is similar to get_table_as_dict, but much less general.
get_table_as_dict can do a lot, but it first reads all data into the memory to operate on it.
In some cases the programmer may like to access to only a small fraction of entries in a table
by using `WHERE column = value` notation, which is not possible with the more generalized
function.
Parameters
==========
table_name: str
which table to get rows from
where_clause: str
SQL-style where clause for row selection
error_if_no_data: bool
if true, this function will raise an error if no data is selected from the table. otherwise, it will
quietly return the empty dictionary
string_the_key: bool
if true, the row number will be converted to a string before being used as a key in the dictionary
row_num_as_key: bool
added as parameter so this function works for KEGG MODULES.db, which does not have unique IDs in the
first column. If True, the returned dictionary will be keyed by integers from 0 to (# rows returned - 1)
omit_parent_column: bool
removes __parent__ column from the data to be returned if __parent__ exists in table structure.
Returns
=======
results_dict: dictionary
contains the requested rows from the table
"""
results_dict = {}
where_clause = where_clause.replace('"', "'")
if self.ROWID_PREPENDS_ROW_DATA(table_name):
table_structure = ['entry_id'] + self.get_table_structure(table_name)
else:
table_structure = self.get_table_structure(table_name)
if omit_parent_column and '__parent__' in table_structure:
table_structure.remove('__parent__')
columns_to_return = list(range(0, len(table_structure)))
rows = self.get_some_rows_from_table(table_name, where_clause)
row_num = 0
for row in rows:
entry = {}
if row_num_as_key:
entry[table_structure[0]] = row[0]
for i in columns_to_return[1:]:
entry[table_structure[i]] = row[i]
if string_the_key:
results_dict[str(row_num)] = entry
else:
results_dict[row_num] = entry
else:
for i in columns_to_return[1:]:
entry[table_structure[i]] = row[i]
if string_the_key:
results_dict[str(row[0])] = entry
else:
results_dict[row[0]] = entry
row_num += 1
if error_if_no_data and not len(results_dict):
raise ConfigError("Query on %s with the where clause of '%s' did not return anything." % (table_name, where_clause))
return results_dict
def get_table_names(self):
response = self._exec("""select name from sqlite_master where type='table'""")
return [r[0] for r in response.fetchall()]
|
gpl-3.0
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/parallel/tests/test_magics.py
|
3
|
12999
|
# -*- coding: utf-8 -*-
"""Test Parallel magics
Authors:
* Min RK
"""
#-------------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
import re
import sys
import time
import zmq
from nose import SkipTest
from IPython.testing import decorators as dec
from IPython.testing.ipunittest import ParametricTestCase
from IPython.utils.io import capture_output
from IPython import parallel as pmod
from IPython.parallel import error
from IPython.parallel import AsyncResult
from IPython.parallel.util import interactive
from IPython.parallel.tests import add_engines
from .clienttest import ClusterTestCase, generate_output
def setup():
add_engines(3, total=True)
class TestParallelMagics(ClusterTestCase, ParametricTestCase):
def test_px_blocking(self):
ip = get_ipython()
v = self.client[-1:]
v.activate()
v.block=True
ip.magic('px a=5')
self.assertEquals(v['a'], [5])
ip.magic('px a=10')
self.assertEquals(v['a'], [10])
# just 'print a' works ~99% of the time, but this ensures that
# the stdout message has arrived when the result is finished:
with capture_output() as io:
ip.magic(
'px import sys,time;print(a);sys.stdout.flush();time.sleep(0.2)'
)
out = io.stdout
self.assertTrue('[stdout:' in out, out)
self.assertFalse('\n\n' in out)
self.assertTrue(out.rstrip().endswith('10'))
self.assertRaisesRemote(ZeroDivisionError, ip.magic, 'px 1/0')
def _check_generated_stderr(self, stderr, n):
expected = [
r'\[stderr:\d+\]',
'^stderr$',
'^stderr2$',
] * n
self.assertFalse('\n\n' in stderr, stderr)
lines = stderr.splitlines()
self.assertEquals(len(lines), len(expected), stderr)
for line,expect in zip(lines, expected):
if isinstance(expect, str):
expect = [expect]
for ex in expect:
self.assertTrue(re.search(ex, line) is not None, "Expected %r in %r" % (ex, line))
def test_cellpx_block_args(self):
"""%%px --[no]block flags work"""
ip = get_ipython()
v = self.client[-1:]
v.activate()
v.block=False
for block in (True, False):
v.block = block
ip.magic("pxconfig --verbose")
with capture_output() as io:
ip.run_cell_magic("px", "", "1")
if block:
self.assertTrue(io.stdout.startswith("Parallel"), io.stdout)
else:
self.assertTrue(io.stdout.startswith("Async"), io.stdout)
with capture_output() as io:
ip.run_cell_magic("px", "--block", "1")
self.assertTrue(io.stdout.startswith("Parallel"), io.stdout)
with capture_output() as io:
ip.run_cell_magic("px", "--noblock", "1")
self.assertTrue(io.stdout.startswith("Async"), io.stdout)
def test_cellpx_groupby_engine(self):
"""%%px --group-outputs=engine"""
ip = get_ipython()
v = self.client[:]
v.block = True
v.activate()
v['generate_output'] = generate_output
with capture_output() as io:
ip.run_cell_magic('px', '--group-outputs=engine', 'generate_output()')
self.assertFalse('\n\n' in io.stdout)
lines = io.stdout.splitlines()
expected = [
r'\[stdout:\d+\]',
'stdout',
'stdout2',
r'\[output:\d+\]',
r'IPython\.core\.display\.HTML',
r'IPython\.core\.display\.Math',
r'Out\[\d+:\d+\]:.*IPython\.core\.display\.Math',
] * len(v)
self.assertEquals(len(lines), len(expected), io.stdout)
for line,expect in zip(lines, expected):
if isinstance(expect, str):
expect = [expect]
for ex in expect:
self.assertTrue(re.search(ex, line) is not None, "Expected %r in %r" % (ex, line))
self._check_generated_stderr(io.stderr, len(v))
def test_cellpx_groupby_order(self):
"""%%px --group-outputs=order"""
ip = get_ipython()
v = self.client[:]
v.block = True
v.activate()
v['generate_output'] = generate_output
with capture_output() as io:
ip.run_cell_magic('px', '--group-outputs=order', 'generate_output()')
self.assertFalse('\n\n' in io.stdout)
lines = io.stdout.splitlines()
expected = []
expected.extend([
r'\[stdout:\d+\]',
'stdout',
'stdout2',
] * len(v))
expected.extend([
r'\[output:\d+\]',
'IPython.core.display.HTML',
] * len(v))
expected.extend([
r'\[output:\d+\]',
'IPython.core.display.Math',
] * len(v))
expected.extend([
r'Out\[\d+:\d+\]:.*IPython\.core\.display\.Math'
] * len(v))
self.assertEquals(len(lines), len(expected), io.stdout)
for line,expect in zip(lines, expected):
if isinstance(expect, str):
expect = [expect]
for ex in expect:
self.assertTrue(re.search(ex, line) is not None, "Expected %r in %r" % (ex, line))
self._check_generated_stderr(io.stderr, len(v))
def test_cellpx_groupby_type(self):
"""%%px --group-outputs=type"""
ip = get_ipython()
v = self.client[:]
v.block = True
v.activate()
v['generate_output'] = generate_output
with capture_output() as io:
ip.run_cell_magic('px', '--group-outputs=type', 'generate_output()')
self.assertFalse('\n\n' in io.stdout)
lines = io.stdout.splitlines()
expected = []
expected.extend([
r'\[stdout:\d+\]',
'stdout',
'stdout2',
] * len(v))
expected.extend([
r'\[output:\d+\]',
r'IPython\.core\.display\.HTML',
r'IPython\.core\.display\.Math',
] * len(v))
expected.extend([
(r'Out\[\d+:\d+\]', r'IPython\.core\.display\.Math')
] * len(v))
self.assertEquals(len(lines), len(expected), io.stdout)
for line,expect in zip(lines, expected):
if isinstance(expect, str):
expect = [expect]
for ex in expect:
self.assertTrue(re.search(ex, line) is not None, "Expected %r in %r" % (ex, line))
self._check_generated_stderr(io.stderr, len(v))
def test_px_nonblocking(self):
ip = get_ipython()
v = self.client[-1:]
v.activate()
v.block=False
ip.magic('px a=5')
self.assertEquals(v['a'], [5])
ip.magic('px a=10')
self.assertEquals(v['a'], [10])
ip.magic('pxconfig --verbose')
with capture_output() as io:
ar = ip.magic('px print (a)')
self.assertTrue(isinstance(ar, AsyncResult))
self.assertTrue('Async' in io.stdout)
self.assertFalse('[stdout:' in io.stdout)
self.assertFalse('\n\n' in io.stdout)
ar = ip.magic('px 1/0')
self.assertRaisesRemote(ZeroDivisionError, ar.get)
def test_autopx_blocking(self):
ip = get_ipython()
v = self.client[-1]
v.activate()
v.block=True
with capture_output() as io:
ip.magic('autopx')
ip.run_cell('\n'.join(('a=5','b=12345','c=0')))
ip.run_cell('b*=2')
ip.run_cell('print (b)')
ip.run_cell('b')
ip.run_cell("b/c")
ip.magic('autopx')
output = io.stdout
self.assertTrue(output.startswith('%autopx enabled'), output)
self.assertTrue(output.rstrip().endswith('%autopx disabled'), output)
self.assertTrue('ZeroDivisionError' in output, output)
self.assertTrue('\nOut[' in output, output)
self.assertTrue(': 24690' in output, output)
ar = v.get_result(-1)
self.assertEquals(v['a'], 5)
self.assertEquals(v['b'], 24690)
self.assertRaisesRemote(ZeroDivisionError, ar.get)
def test_autopx_nonblocking(self):
ip = get_ipython()
v = self.client[-1]
v.activate()
v.block=False
with capture_output() as io:
ip.magic('autopx')
ip.run_cell('\n'.join(('a=5','b=10','c=0')))
ip.run_cell('print (b)')
ip.run_cell('import time; time.sleep(0.1)')
ip.run_cell("b/c")
ip.run_cell('b*=2')
ip.magic('autopx')
output = io.stdout.rstrip()
self.assertTrue(output.startswith('%autopx enabled'))
self.assertTrue(output.endswith('%autopx disabled'))
self.assertFalse('ZeroDivisionError' in output)
ar = v.get_result(-2)
self.assertRaisesRemote(ZeroDivisionError, ar.get)
# prevent TaskAborted on pulls, due to ZeroDivisionError
time.sleep(0.5)
self.assertEquals(v['a'], 5)
# b*=2 will not fire, due to abort
self.assertEquals(v['b'], 10)
def test_result(self):
ip = get_ipython()
v = self.client[-1]
v.activate()
data = dict(a=111,b=222)
v.push(data, block=True)
for name in ('a', 'b'):
ip.magic('px ' + name)
with capture_output() as io:
ip.magic('pxresult')
output = io.stdout
msg = "expected %s output to include %s, but got: %s" % \
('%pxresult', str(data[name]), output)
self.assertTrue(str(data[name]) in output, msg)
@dec.skipif_not_matplotlib
def test_px_pylab(self):
"""%pylab works on engines"""
ip = get_ipython()
v = self.client[-1]
v.block = True
v.activate()
with capture_output() as io:
ip.magic("px %pylab inline")
self.assertTrue("Welcome to pylab" in io.stdout, io.stdout)
self.assertTrue("backend_inline" in io.stdout, io.stdout)
with capture_output() as io:
ip.magic("px plot(rand(100))")
self.assertTrue('Out[' in io.stdout, io.stdout)
self.assertTrue('matplotlib.lines' in io.stdout, io.stdout)
def test_pxconfig(self):
ip = get_ipython()
rc = self.client
v = rc.activate(-1, '_tst')
self.assertEquals(v.targets, rc.ids[-1])
ip.magic("%pxconfig_tst -t :")
self.assertEquals(v.targets, rc.ids)
ip.magic("%pxconfig_tst -t ::2")
self.assertEquals(v.targets, rc.ids[::2])
ip.magic("%pxconfig_tst -t 1::2")
self.assertEquals(v.targets, rc.ids[1::2])
ip.magic("%pxconfig_tst -t 1")
self.assertEquals(v.targets, 1)
ip.magic("%pxconfig_tst --block")
self.assertEquals(v.block, True)
ip.magic("%pxconfig_tst --noblock")
self.assertEquals(v.block, False)
def test_cellpx_targets(self):
"""%%px --targets doesn't change defaults"""
ip = get_ipython()
rc = self.client
view = rc.activate(rc.ids)
self.assertEquals(view.targets, rc.ids)
ip.magic('pxconfig --verbose')
for cell in ("pass", "1/0"):
with capture_output() as io:
try:
ip.run_cell_magic("px", "--targets all", cell)
except pmod.RemoteError:
pass
self.assertTrue('engine(s): all' in io.stdout)
self.assertEquals(view.targets, rc.ids)
def test_cellpx_block(self):
"""%%px --block doesn't change default"""
ip = get_ipython()
rc = self.client
view = rc.activate(rc.ids)
view.block = False
self.assertEquals(view.targets, rc.ids)
ip.magic('pxconfig --verbose')
for cell in ("pass", "1/0"):
with capture_output() as io:
try:
ip.run_cell_magic("px", "--block", cell)
except pmod.RemoteError:
pass
self.assertFalse('Async' in io.stdout)
self.assertFalse(view.block)
|
lgpl-3.0
|
ZENGXH/scikit-learn
|
examples/linear_model/plot_theilsen.py
|
232
|
3615
|
"""
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
|
bsd-3-clause
|
xiaoxiamii/scikit-learn
|
sklearn/cross_decomposition/tests/test_pls.py
|
215
|
11427
|
import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
|
bsd-3-clause
|
poldrack/myconnectome
|
myconnectome/rnaseq/get_ImmPort_eigengenes.py
|
2
|
2781
|
"""
get eigengene for each cluster
"""
import os,sys
import numpy
from sklearn.decomposition import PCA
import sklearn.linear_model
stdir=os.environ['MYCONNECTOME_DIR']
rnaseqdir=os.path.join(stdir,'rna-seq')
immportdir=os.path.join(rnaseqdir,'ImmPort')
def get_ImmPort_eigengenes():
if not os.path.exists(immportdir):
os.mkdir(immportdir)
genelists={}
f=open(os.path.join(immportdir,'all_ImmPort_pathways.txt'))
for l in f.readlines():
l_s=l.strip().split('\t')
genelists[l_s[0]]=l_s[1:]
f.close()
varstabfile=os.path.join(rnaseqdir,'varstab_data_prefiltered_rin_3PC_regressed.txt')
pca = PCA(n_components=1)
f=open(varstabfile)
header=f.readline()
exprdata={}
gene_names=[]
for l in f.readlines():
l_s=l.strip().split()
gene_name=l_s[0].replace('"','')
gene_names.append(gene_name)
exprdata[gene_name]=[float(i) for i in l_s[1:]]
setdata={}
setdata_genes={}
for k in genelists.iterkeys():
if not setdata.has_key(k):
setdata[k]=[]
setgenes=genelists[k]
#print k, setgenes
tmp=[]
tmp_genes=[]
for g in setgenes:
try:
tmp.append(exprdata[g])
tmp_genes.append(g)
except:
print g,'missing from data'
pass
if not setdata.has_key(k):
setdata[k]=[]
setdata[k]=numpy.array(tmp)
setdata_genes[k]=tmp_genes
rin=numpy.loadtxt(os.path.join(rnaseqdir,'rin.txt'))
seteig=numpy.zeros((48,len(genelists)))
setexplained=numpy.zeros(len(genelists))
genelistkeys=genelists.keys()
genelistkeys.sort()
for i in range(len(genelists)):
k=genelistkeys[i]
pca.fit(setdata[k].T)
seteig[:,i]=pca.transform(setdata[k].T)[:,0]
print numpy.corrcoef(seteig[:,i],numpy.mean(setdata[k],0))[0,1]
if numpy.corrcoef(seteig[:,i],numpy.mean(setdata[k],0))[0,1] < 0:
#print 'flippping sign of PC to match data'
seteig[:,i]=-1.0*seteig[:,i]
setexplained[i]=pca.explained_variance_ratio_
print k,setdata[k].shape,setexplained[i],numpy.corrcoef(seteig[:,i],rin)[0,1]
f=open(os.path.join(immportdir,'ImmPort_eigengenes_prefilt_rin3PCreg.txt'),'w')
for i in range(len(genelists)):
f.write('%s\t%s\n'%(genelistkeys[i],'\t'.join(['%f'%j for j in seteig[:,i]])))
f.close()
numpy.savetxt(os.path.join(immportdir,'ImmPort_eigengenes_prefilt_rin3PCreg_explainedvariance.txt'),setexplained)
if __name__ == "__main__":
get_ImmPort_eigengenes()
|
mit
|
Akshay0724/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
36
|
11813
|
"""Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
# Test the interpolating property for different kernels.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0.)
def test_lml_improving():
# Test that hyperparameter-tuning improves log-marginal likelihood.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
# Test that lml of optimized kernel is stored correctly.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
# Test that we are in local maximum after hyperparameter-optimization.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
# Test that hyperparameter-optimization remains in bounds#
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
# Compare analytic and numeric gradient of log marginal likelihood.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
# Test that GP prior has mean 0 and identical variances.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
# Test that statistics of samples drawn from GP are correct.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
# Test that kernel parameters are unmodified when optimizer is None.
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
# Test that predicted std.-dev. is consistent with cov's diagonal.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
# Test that GPR can identify meaningful anisotropic length-scales.
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
# Test normalization of the target values in GP
# Fitting non-normalizing GP on normalized y and fitting normalizing GP
# on unnormalized y should yield identical results
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
# Test that GPR can deal with multi-dimensional target values
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
# Test that GPR can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_duplicate_input():
# Test GPR can handle two different output-values for the same input.
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
|
bsd-3-clause
|
vinodkc/spark
|
python/pyspark/pandas/plot/plotly.py
|
14
|
7646
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import TYPE_CHECKING, Union
import pandas as pd
from pyspark.pandas.plot import (
HistogramPlotBase,
name_like_string,
PandasOnSparkPlotAccessor,
BoxPlotBase,
KdePlotBase,
)
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
def plot_pandas_on_spark(data: Union["ps.DataFrame", "ps.Series"], kind: str, **kwargs):
import plotly
# pandas-on-Spark specific plots
if kind == "pie":
return plot_pie(data, **kwargs)
if kind == "hist":
return plot_histogram(data, **kwargs)
if kind == "box":
return plot_box(data, **kwargs)
if kind == "kde" or kind == "density":
return plot_kde(data, **kwargs)
# Other plots.
return plotly.plot(PandasOnSparkPlotAccessor.pandas_plot_data_map[kind](data), kind, **kwargs)
def plot_pie(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
from plotly import express
data = PandasOnSparkPlotAccessor.pandas_plot_data_map["pie"](data)
if isinstance(data, pd.Series):
pdf = data.to_frame()
return express.pie(pdf, values=pdf.columns[0], names=pdf.index, **kwargs)
elif isinstance(data, pd.DataFrame):
values = kwargs.pop("y", None)
default_names = None
if values is not None:
default_names = data.index
return express.pie(
data,
values=kwargs.pop("values", values),
names=kwargs.pop("names", default_names),
**kwargs,
)
else:
raise RuntimeError("Unexpected type: [%s]" % type(data))
def plot_histogram(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
import plotly.graph_objs as go
import pyspark.pandas as ps
bins = kwargs.get("bins", 10)
y = kwargs.get("y")
if y and isinstance(data, ps.DataFrame):
# Note that the results here are matched with matplotlib. x and y
# handling is different from pandas' plotly output.
data = data[y]
psdf, bins = HistogramPlotBase.prepare_hist_data(data, bins)
assert len(bins) > 2, "the number of buckets must be higher than 2."
output_series = HistogramPlotBase.compute_hist(psdf, bins)
prev = float("%.9f" % bins[0]) # to make it prettier, truncate.
text_bins = []
for b in bins[1:]:
norm_b = float("%.9f" % b)
text_bins.append("[%s, %s)" % (prev, norm_b))
prev = norm_b
text_bins[-1] = text_bins[-1][:-1] + "]" # replace ) to ] for the last bucket.
bins = 0.5 * (bins[:-1] + bins[1:])
output_series = list(output_series)
bars = []
for series in output_series:
bars.append(
go.Bar(
x=bins,
y=series,
name=name_like_string(series.name),
text=text_bins,
hovertemplate=(
"variable=" + name_like_string(series.name) + "<br>value=%{text}<br>count=%{y}"
),
)
)
fig = go.Figure(data=bars, layout=go.Layout(barmode="stack"))
fig["layout"]["xaxis"]["title"] = "value"
fig["layout"]["yaxis"]["title"] = "count"
return fig
def plot_box(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
import plotly.graph_objs as go
import pyspark.pandas as ps
if isinstance(data, ps.DataFrame):
raise RuntimeError(
"plotly does not support a box plot with pandas-on-Spark DataFrame. Use Series instead."
)
# 'whis' isn't actually an argument in plotly (but in matplotlib). But seems like
# plotly doesn't expose the reach of the whiskers to the beyond the first and
# third quartiles (?). Looks they use default 1.5.
whis = kwargs.pop("whis", 1.5)
# 'precision' is pandas-on-Spark specific to control precision for approx_percentile
precision = kwargs.pop("precision", 0.01)
# Plotly options
boxpoints = kwargs.pop("boxpoints", "suspectedoutliers")
notched = kwargs.pop("notched", False)
if boxpoints not in ["suspectedoutliers", False]:
raise ValueError(
"plotly plotting backend does not support 'boxpoints' set to '%s'. "
"Set to 'suspectedoutliers' or False." % boxpoints
)
if notched:
raise ValueError(
"plotly plotting backend does not support 'notched' set to '%s'. "
"Set to False." % notched
)
colname = name_like_string(data.name)
spark_column_name = data._internal.spark_column_name_for(data._column_label)
# Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = BoxPlotBase.compute_stats(data, spark_column_name, whis, precision)
# Creates a column to flag rows as outliers or not
outliers = BoxPlotBase.outliers(data, spark_column_name, *col_fences)
# Computes min and max values of non-outliers - the whiskers
whiskers = BoxPlotBase.calc_whiskers(spark_column_name, outliers)
fliers = None
if boxpoints:
fliers = BoxPlotBase.get_fliers(spark_column_name, outliers, whiskers[0])
fliers = [fliers] if len(fliers) > 0 else None
fig = go.Figure()
fig.add_trace(
go.Box(
name=colname,
q1=[col_stats["q1"]],
median=[col_stats["med"]],
q3=[col_stats["q3"]],
mean=[col_stats["mean"]],
lowerfence=[whiskers[0]],
upperfence=[whiskers[1]],
y=fliers,
boxpoints=boxpoints,
notched=notched,
**kwargs, # this is for workarounds. Box takes different options from express.box.
)
)
fig["layout"]["xaxis"]["title"] = colname
fig["layout"]["yaxis"]["title"] = "value"
return fig
def plot_kde(data: Union["ps.DataFrame", "ps.Series"], **kwargs):
from plotly import express
import pyspark.pandas as ps
if isinstance(data, ps.DataFrame) and "color" not in kwargs:
kwargs["color"] = "names"
psdf = KdePlotBase.prepare_kde_data(data)
sdf = psdf._internal.spark_frame
data_columns = psdf._internal.data_spark_columns
ind = KdePlotBase.get_ind(sdf.select(*data_columns), kwargs.pop("ind", None))
bw_method = kwargs.pop("bw_method", None)
pdfs = []
for label in psdf._internal.column_labels:
pdfs.append(
pd.DataFrame(
{
"Density": KdePlotBase.compute_kde(
sdf.select(psdf._internal.spark_column_for(label)),
ind=ind,
bw_method=bw_method,
),
"names": name_like_string(label),
"index": ind,
}
)
)
pdf = pd.concat(pdfs)
fig = express.line(pdf, x="index", y="Density", **kwargs)
fig["layout"]["xaxis"]["title"] = None
return fig
|
apache-2.0
|
xiaohan2012/capitalization-restoration-train
|
hmm.py
|
1
|
2186
|
"""
This implementation is based on the HMM description in Chapter 8, Huang,
Acero and Hon, Spoken Language Processing and includes an extension for
training shallow HMM parsers or specialized HMMs as in Molina et.
al, 2002.
"""
from nltk.tag.hmm import HiddenMarkovModelTagger
from data import load_labeled_data
from cap_transform import transform_data
from util import (make_uppercase_title, make_lowercase_title, make_capitalized_title)
def main(train_data, test_data):
print "Training"
m = HiddenMarkovModelTagger.train(train_data)
print "Predicting"
predicted_labels = []
for i, sent in enumerate(test_data):
if i % 500 == 0:
print "%d / %d" %(i, len(test_data))
predicted_labels += [tag
for _, tag in m.tag(
[word for word, _ in sent]
)]
correct_labels = [tag
for sent in test_data
for _, tag in sent]
# print predicted_labels
# print correct_labels
from sklearn.metrics import classification_report
print classification_report(correct_labels, predicted_labels)
correct_n = len([1
for p, c in zip(predicted_labels, correct_labels)
if p == c])
print "Item accuracy:", float(correct_n) / len(correct_labels)
if __name__ == "__main__":
import sys
try:
oper = sys.argv[1]
except IndexError:
print "Please specify the mode"
sys.exit(-1)
oper_map = {
"upper": make_uppercase_title,
"lower": make_lowercase_title,
"cap": make_capitalized_title
}
transform_func = oper_map.get(oper)
if not transform_func:
raise ValueError("Invalid oper")
else:
print "Oper: %s" %(oper)
train_data = transform_data(list(load_labeled_data("corpus/titles_train.txt")),
transform_func)
test_data = transform_data(list(load_labeled_data("corpus/titles_test.txt")),
transform_func)
main(train_data, test_data)
|
mit
|
hotpxl/mxnet
|
example/gluon/dcgan.py
|
1
|
8007
|
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
import argparse
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
import numpy as np
import logging
from datetime import datetime
import os
import time
def fill_buf(buf, i, img, shape):
n = buf.shape[0]/shape[1]
m = buf.shape[1]/shape[0]
sx = (i%m)*shape[0]
sy = (i/m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
return None
def visual(title, X, name):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = buff[:,:,::-1]
plt.imshow(buff)
plt.title(title)
plt.savefig(name)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset to use. options are cifar10 and imagenet.')
parser.add_argument('--batch-size', type=int, default=64, help='input batch size')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='./results', help='folder to output images and model checkpoints')
parser.add_argument('--check-point', default=True, help="save results at each epoch or not")
opt = parser.parse_args()
print(opt)
logging.basicConfig(level=logging.DEBUG)
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
if opt.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
check_point = bool(opt.check_point)
outf = opt.outf
if not os.path.exists(outf):
os.makedirs(outf)
def transformer(data, label):
# resize to 64x64
data = mx.image.imresize(data, 64, 64)
# transpose from (64, 64, 3) to (3, 64, 64)
data = mx.nd.transpose(data, (2,0,1))
# normalize to [-1, 1]
data = data.astype(np.float32)/128 - 1
# if image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = mx.nd.tile(data, (3, 1, 1))
return data, label
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size=opt.batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size=opt.batch_size, shuffle=False)
# build the generator
netG = nn.Sequential()
with netG.name_scope():
# input is Z, going into a convolution
netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 4 x 4
netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 8 x 8
netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 16 x 16
netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 32 x 32
netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
netG.add(nn.Activation('tanh'))
# state size. (nc) x 64 x 64
# build the discriminator
netD = nn.Sequential()
with netD.name_scope():
# input is (nc) x 64 x 64
netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 32 x 32
netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 16 x 16
netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 8 x 8
netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 4 x 4
netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
# loss
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# initialize the generator and the discriminator
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
# trainer for the generator and the discriminator
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
# ============printing==============
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)
metric = mx.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
iter = 0
for epoch in range(opt.nepoch):
tic = time.time()
btic = time.time()
for data, _ in train_data:
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real_t
data = data.as_in_context(ctx)
noise = mx.nd.random_normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx)
with autograd.record():
output = netD(data)
output = output.reshape((opt.batch_size, 2))
errD_real = loss(output, real_label)
metric.update([real_label,], [output,])
fake = netG(noise)
output = netD(fake.detach())
output = output.reshape((opt.batch_size, 2))
errD_fake = loss(output, fake_label)
errD = errD_real + errD_fake
errD.backward()
metric.update([fake_label,], [output,])
trainerD.step(opt.batch_size)
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
with autograd.record():
output = netD(fake)
output = output.reshape((-1, 2))
errG = loss(output, real_label)
errG.backward()
trainerG.step(opt.batch_size)
name, acc = metric.get()
# logging.info('speed: {} samples/s'.format(opt.batch_size / (time.time() - btic)))
logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d' %(mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch))
if iter % 1 == 0:
visual('gout', fake.asnumpy(), name=os.path.join(outf,'fake_img_iter_%d.png' %iter))
visual('data', data.asnumpy(), name=os.path.join(outf,'real_img_iter_%d.png' %iter))
iter = iter + 1
btic = time.time()
name, acc = metric.get()
metric.reset()
logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
logging.info('time: %f' % (time.time() - tic))
if check_point:
netG.save_params(os.path.join(outf,'generator_epoch_%d.params' %epoch))
netD.save_params(os.path.join(outf,'discriminator_epoch_%d.params' % epoch))
netG.save_params(os.path.join(outf, 'generator.params'))
netD.save_params(os.path.join(outf, 'discriminator.params'))
|
apache-2.0
|
YetAnotherTomek/egfrd
|
samples/mapk/rebind_ratio/plot_hist.py
|
6
|
6368
|
#!/usr/bin/env python
# D=1
# python plot_hist.py "." mapk3_1e-15_1_fixed_1e-1_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_1e-2_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_1e-3_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_1e-4_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_1e-5_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_1e-6_normal_ALL_reactions.rebind mapk3_1e-15_1_fixed_0_normal_ALL_reactions.rebind
# t_half = 1e-6
rfiles = ['mapk3_1e-15_0.03125_fixed_1e-6_normal_ALL_reactions.rebind',
'mapk3_1e-15_0.0625_fixed_1e-6_normal_ALL_reactions.rebind',
'mapk3_1e-15_0.25_fixed_1e-6_normal_ALL_reactions.rebind',
'mapk3_1e-15_1_fixed_1e-6_normal_ALL_reactions.rebind',
'mapk3_1e-15_4_fixed_1e-6_normal_ALL_reactions.rebind']
sfiles = []
# t_half = 1e-2
rfiles = ['mapk3_1e-15_0.03125_fixed_1e-2_normal_ALL_reactions.rebind',
'mapk3_1e-15_0.0625_fixed_1e-2_normal_ALL_reactions.rebind',
'mapk3_1e-15_0.25_fixed_1e-2_normal_ALL_reactions.rebind',
'mapk3_1e-15_1_fixed_1e-2_normal_ALL_reactions.rebind',
'mapk3_1e-15_4_fixed_1e-2_normal_ALL_reactions.rebind']
sfiles=[]
sdir = 's02/data/'
sfiles = ['model3-smallt_0.03125_1e-2_ALL_t.dat',
'model3-smallt_0.0625_1e-2_ALL_t.dat',
'model3-smallt_0.25_1e-2_ALL_t.dat',
'model3-smallt_1_1e-2_ALL_t.dat',
'model3-smallt_4_1e-2_ALL_t.dat']
from matplotlib.pylab import *
import math
import numpy
import sys
import re
import glob
def load_sfile(sfile):
sfile = sfile.replace('ALL', '*')
filelist = glob.glob(sdir + sfile)
print filelist
N = 0
data = []
for fname in filelist:
f = open(fname)
firstline = f.readline()
n = int(firstline)
#print 'N', n
d = [float(line) for line in f.readlines()]
f.close()
N += n
data.extend(d)
print 'supplementary data:', N, '(', len(data), ')'
return data, N
def plot_hist(filename, xmin, xmax, BINS, pattern=None, factor=1.0,
sfile=None):
if sfile != None:
thr = 1e-5
else:
thr = 1e-20
file = open(filename)
data=[]
for line in file.readlines():
line = line.split()
t = float(line[0])
event_type = line[1]
if t == 0:
print 'skip zero'
continue
if pattern == None or pattern.match(event_type):
data.append(t)
file.close()
data = numpy.array(data)
N = len(data)
data = data.compress(data != numpy.inf)
n, bins = numpy.histogram(numpy.log10(data),
range=(numpy.log10(thr),numpy.log10(data.max())),
bins=BINS/2, new=True)
n = n.astype(numpy.floating)
n /= float(N)
n *= factor
#x = 10**bins[:-1]
x = (10**bins[1:] + 10**bins[:-1]) / 2
dx = (10**bins[1:]- 10**bins[:-1])
y = n / dx # n+1e-10
print x, y
if sfile != None:
print sfile
sdata, sN = load_sfile(sfile)
sdata = numpy.array(sdata)
#sdata = numpy.compress(sdata <= thr,sdata)
sn, sbins = numpy.histogram(numpy.log10(sdata),
range=(numpy.log10(sdata.min()),
numpy.log10(thr)),
bins=BINS/3, new=True)
sn = sn.astype(numpy.floating)
sn /= float(sN)
sn *= factor
sx = (10**sbins[1:] + 10**sbins[:-1]) / 2
sdx = (10**sbins[1:]- 10**sbins[:-1])
sy = sn / sdx # n+1e-10
x = numpy.concatenate((sx, x))
y = numpy.concatenate((sy, y))
print N, sN, len(sdata)
return loglog(x, y)#, label=filename )
def plot_hist2(filename, xmin, xmax, N, pattern=None, factor=1.0):
file = open(filename)
data=[]
for line in file.readlines():
line = line.split()
t = float(line[0])
event_type = line[1]
if t == 0:
print 'skip zero'
continue
if pattern == None or pattern.match(event_type):
data.append(t)
data = numpy.array(data)
data.sort()
i = 0
p = 5
x = []
y = []
ld = len(data)
while i+p < ld:
slice = data[i:i+p]
min, max = slice.min(), slice.max()
x.append((min + max) / 2)
y.append(1.0 / (max - min))
i += p
y = numpy.array(y,numpy.floating)
y /= float(len(data))
y *= factor
return loglog(x, y)#, label=filename )
if __name__ == '__main__':
import numpy
BINS=50
#pattern = re.compile(sys.argv[1])
#xmin = 1e-12
xmin = 1e-8
xmax = 100
axes([.16,.16,.8,.8])
Dlist = [0.03e-12,0.06e-12,0.25e-12,1e-12, 4e-12]
lines=[]
for n, filename in enumerate(rfiles):
D = Dlist[n]
if len(sfiles) >= 1:
sfile = sfiles[n]
else:
sfile = None
sigma = 5e-9
kD = 4 * numpy.pi * sigma * D
k_a = 9.2e-20#1.6e9 / (1000*6e23)
#factor = D * (1 + (k_a / kD))
factor = 1
print 'factor', factor
line = plot_hist(filename, xmin, xmax, BINS, None, factor, sfile = sfile)
lines.append(line)
xlabel('Second association times', size=26)
ylabel('Relative frequency', size=26)
#ylabel(r'$p(t) \cdot D (1 + (k_a / kD))$', size=26)
xticks([1e-12, 1e-9, 1e-6, 1e-3, 1],
[r'${\rm 1 ps}$',
r'${\rm 1 ns}$',
r'${\rm 1 \mu s}$',
r'${\rm 1 ms}$',
r'${\rm 1 s}$'],
size=24)
yticks(size=18)
xlim(xmin, xmax)
ylim(5e-5, 5e5)
leg = legend( lines, (r'$D=0.03 \ \ {\rm \mu m^2 / s}$',
r'$D=0.06 \ \ {\rm \mu m^2 / s}$',
# # r'$D=0.13 \ \ {\rm \mu m^2 / s}$',
r'$D=0.25 \ \ {\rm \mu m^2 / s}$',
r'$D=1.0 \ \ {\rm \mu m^2 / s}$',
r'$D=4.0 \ \ {\rm \mu m^2 / s}$',
),
loc=3,
shadow=True,
pad=0.05,
labelsep=0
)
for l in leg.get_lines():
l.set_linewidth(1.5) # the legend line width
show()
|
gpl-2.0
|
pratapvardhan/scikit-learn
|
sklearn/neural_network/tests/test_mlp.py
|
46
|
18585
|
"""
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# Licence: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false)
np.seterr(all='warn')
ACTIVATION_TYPES = ["logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
def test_alpha():
# Test that larger alpha yields weights closer to zero"""
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example."""
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(algorithm='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.classes_ = [0, 1]
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
mlp.label_binarizer_.y_type_ = 'binary'
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
assert_almost_equal(mlp.decision_function(X), 1.043, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
algorithm='l-bfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems."""
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuses past solution."""
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected."""
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected"""
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error"""
X = [[3, 2]]
y = [0]
clf = MLPClassifier(algorithm='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit'for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(algorithm='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(algorithm='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(algorithm='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(algorithm='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling."""
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(
algorithm='sgd').partial_fit,
X, y,
classes=[2])
# l-bfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(algorithm='l-bfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error"""
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(algorithm='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class."""
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multi():
# Test that predict_proba works as expected for multi class."""
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results."""
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(random_state=1, hidden_layer_sizes=15)
mlp.fit(X, y)
pred1 = mlp.decision_function(X)
mlp.fit(X_sparse, y)
pred2 = mlp.decision_function(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the algorithm to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd', verbose=10)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(algorithm='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, algorithm='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd',
learning_rate='adaptive', verbose=10)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
|
bsd-3-clause
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/examples/api/histogram_path_demo.py
|
6
|
1464
|
"""
This example shows how to use a path patch to draw a bunch of
rectangles. The technique of using lots of Rectangle instances, or
the faster method of using PolyCollections, were implemented before we
had proper paths with moveto/lineto, closepoly etc in mpl. Now that
we have them, we can draw collections of regularly shaped objects with
homogeous properties more efficiently with a PathCollection. This
example makes a histogram -- its more work to set up the vertex arrays
at the outset, but it should be much faster for large numbers of
objects
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
fig = plt.figure()
ax = fig.add_subplot(111)
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 50)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
# we need a (numrects x numsides x 2) numpy array for the path helper
# function to build a compound path
XY = np.array([[left,left,right,right], [bottom,top,top,bottom]]).T
# get the Path object
barpath = path.Path.make_compound_path_from_polys(XY)
# make a patch out of it
patch = patches.PathPatch(barpath, facecolor='blue', edgecolor='gray', alpha=0.8)
ax.add_patch(patch)
# update the view limits
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
plt.show()
|
mit
|
dariocorral/panoanda
|
panoanda/tickers.py
|
1
|
2954
|
"""
Created on Sat Sep 16 18:32:01 2017
@author: dariocorral
"""
import os
import oandapy
import pandas as pd
class Tickers(object):
"""
Basic info about tickers available for OANDA trading
"""
#oanda_api private attribute
_oanda_api = oandapy.API(environment = os.environ['ENV'],
access_token = os.environ['TOKEN'])
@property
def dataframe(self):
"""
Tickers Dataframe with basic info:
:param : no params
:return : dataFrame object
* Pip value
* DisplayName
* Max Trade Units
* Base
* Quote
* Pip Decimals
"""
#Call dict OANDA API and transform to DataFrame
df = self._oanda_api.get_instruments(os.environ['ACCOUNT'])
df = df.get('instruments')
df = pd.DataFrame.from_dict(df)
base = df['instrument'].str.split('_', expand = True)
df = df.join(base)
df.set_index ('instrument',inplace = True)
#Rename columns
df.columns = (u'displayName', u'maxTradeUnits', u'pip', u'base',
u'quote')
#Change tick Value to float
df['pip'] = df['pip'].astype(float)
return df
def tick_value(self,ticker):
"""
Minimum tick value
:param: ticker
:type : string, list or tuple
:return: float or dataframe
"""
return self.dataframe.loc[ticker]['pip']
def display_name(self,ticker):
"""
ticker Display Name
:param: ticker
:type : string, list or tuple
:return : string or datrame
"""
return self.dataframe.loc[ticker]['displayName']
def max_trade_units(self,ticker):
"""
Max Trade Units allowed
:param: ticker
:type : string, list or tuple
:return : integer or dataframe
"""
return self.dataframe.loc[ticker]['maxTradeUnits']
def base(self,ticker):
"""
ticker base part
:param: ticker
:type : string, list or tuple
:return : string or dataframe
"""
return self.dataframe.loc[ticker]['base']
def quote(self,ticker):
"""
ticker quote part
:param: ticker
:type : string, list or tuple
:return : string or dataframe
"""
return self.dataframe.loc[ticker]['quote']
def pip_decimals(self,ticker):
"""
ticker decimals (for rounding quotes)
:param: ticker
:type : string
:return : int
"""
inverse = 1 / self.tick_value(ticker)
return str(inverse).count('0')
|
mit
|
noelevans/sandpit
|
boston/categorise_with_scalers.py
|
1
|
1162
|
import numpy as np
from scipy import stats
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.cross_validation import train_test_split
def main():
boston = datasets.load_boston()
y = boston.target # House prices
mean = np.mean(y)
y = y > mean # y now means is_above_average_house_price
fns = boston.feature_names
predictors = np.array([
'NOX', # Air concentration of nitrous-oxide
'CRIM', # Crime rate per capita
])
X_idx = np.in1d(fns, predictors)
X = boston.data[:, X_idx]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=33)
for p, x in zip(predictors, np.rollaxis(X, 1)):
print('%s vs House price - srcc: %f, p_value: %f' % (
(p, ) + stats.spearmanr(x, y)))
model = GaussianNB()
model.fit(X_train, y_train)
y_hat = model.predict(X_test)
matches = y_hat == y_test
print('Success rate: %i / %i = %f' % (
matches.sum(), matches.size, float(matches.sum()) / matches.size))
if __name__ == '__main__':
main()
|
mit
|
wlamond/scikit-learn
|
sklearn/preprocessing/data.py
|
8
|
68191
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import bincount
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
See also
--------
minmax_scale: Equivalent function without the object oriented API.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the object oriented API.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
normalize: Equivalent function without the object oriented API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the object oriented API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and four samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
|
bsd-3-clause
|
shoyer/xray
|
xarray/core/indexing.py
|
1
|
47782
|
import functools
import operator
from collections import defaultdict
from contextlib import suppress
from datetime import timedelta
from typing import Sequence
import numpy as np
import pandas as pd
from . import duck_array_ops, nputils, utils
from .pycompat import dask_array_type, integer_types
from .utils import is_dict_like
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError('too many indices')
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key)
def _expand_slice(slice_, size):
return np.arange(*slice_.indices(size))
def _sanitize_slice_element(x):
from .variable import Variable
from .dataarray import DataArray
if isinstance(x, (Variable, DataArray)):
x = x.values
if isinstance(x, np.ndarray):
if x.ndim != 0:
raise ValueError('cannot use non-scalar arrays in a slice for '
'xarray indexing: {}'.format(x))
x = x[()]
if isinstance(x, np.timedelta64):
# pandas does not support indexing with np.timedelta64 yet:
# https://github.com/pandas-dev/pandas/issues/20393
x = pd.Timedelta(x)
return x
def _asarray_tuplesafe(values):
"""
Convert values into a numpy array of at most 1-dimension, while preserving
tuples.
Adapted from pandas.core.common._asarray_tuplesafe
"""
if isinstance(values, tuple):
result = utils.to_0d_object_array(values)
else:
result = np.asarray(values)
if result.ndim == 2:
result = np.empty(len(values), dtype=object)
result[:] = values
return result
def _is_nested_tuple(possible_tuple):
return (isinstance(possible_tuple, tuple) and
any(isinstance(value, (tuple, list, slice))
for value in possible_tuple))
def _index_method_kwargs(method, tolerance):
# backwards compatibility for pandas<0.16 (method) or pandas<0.17
# (tolerance)
kwargs = {}
if method is not None:
kwargs['method'] = method
if tolerance is not None:
kwargs['tolerance'] = tolerance
return kwargs
def get_loc(index, label, method=None, tolerance=None):
kwargs = _index_method_kwargs(method, tolerance)
return index.get_loc(label, **kwargs)
def get_indexer_nd(index, labels, method=None, tolerance=None):
""" Call pd.Index.get_indexer(labels). """
kwargs = _index_method_kwargs(method, tolerance)
flat_labels = np.ravel(labels)
flat_indexer = index.get_indexer(flat_labels, **kwargs)
indexer = flat_indexer.reshape(labels.shape)
return indexer
def convert_label_indexer(index, label, index_name='', method=None,
tolerance=None):
"""Given a pandas.Index and labels (e.g., from __getitem__) for one
dimension, return an indexer suitable for indexing an ndarray along that
dimension. If `index` is a pandas.MultiIndex and depending on `label`,
return a new pandas.Index or pandas.MultiIndex (otherwise return None).
"""
new_index = None
if isinstance(label, slice):
if method is not None or tolerance is not None:
raise NotImplementedError(
'cannot use ``method`` argument if any indexers are '
'slice objects')
indexer = index.slice_indexer(_sanitize_slice_element(label.start),
_sanitize_slice_element(label.stop),
_sanitize_slice_element(label.step))
if not isinstance(indexer, slice):
# unlike pandas, in xarray we never want to silently convert a
# slice indexer into an array indexer
raise KeyError('cannot represent labeled-based slice indexer for '
'dimension %r with a slice over integer positions; '
'the index is unsorted or non-unique' % index_name)
elif is_dict_like(label):
is_nested_vals = _is_nested_tuple(tuple(label.values()))
if not isinstance(index, pd.MultiIndex):
raise ValueError('cannot use a dict-like object for selection on '
'a dimension that does not have a MultiIndex')
elif len(label) == index.nlevels and not is_nested_vals:
indexer = index.get_loc(tuple((label[k] for k in index.names)))
else:
for k, v in label.items():
# index should be an item (i.e. Hashable) not an array-like
if isinstance(v, Sequence) and not isinstance(v, str):
raise ValueError('Vectorized selection is not '
'available along level variable: ' + k)
indexer, new_index = index.get_loc_level(
tuple(label.values()), level=tuple(label.keys()))
# GH2619. Raise a KeyError if nothing is chosen
if indexer.dtype.kind == 'b' and indexer.sum() == 0:
raise KeyError('{} not found'.format(label))
elif isinstance(label, tuple) and isinstance(index, pd.MultiIndex):
if _is_nested_tuple(label):
indexer = index.get_locs(label)
elif len(label) == index.nlevels:
indexer = index.get_loc(label)
else:
indexer, new_index = index.get_loc_level(
label, level=list(range(len(label)))
)
else:
label = (label if getattr(label, 'ndim', 1) > 1 # vectorized-indexing
else _asarray_tuplesafe(label))
if label.ndim == 0:
if isinstance(index, pd.MultiIndex):
indexer, new_index = index.get_loc_level(label.item(), level=0)
else:
indexer = get_loc(index, label.item(), method, tolerance)
elif label.dtype.kind == 'b':
indexer = label
else:
if isinstance(index, pd.MultiIndex) and label.ndim > 1:
raise ValueError('Vectorized selection is not available along '
'MultiIndex variable: ' + index_name)
indexer = get_indexer_nd(index, label, method, tolerance)
if np.any(indexer < 0):
raise KeyError('not all values found in index %r'
% index_name)
return indexer, new_index
def get_dim_indexers(data_obj, indexers):
"""Given a xarray data object and label based indexers, return a mapping
of label indexers with only dimension names as keys.
It groups multiple level indexers given on a multi-index dimension
into a single, dictionary indexer for that dimension (Raise a ValueError
if it is not possible).
"""
invalid = [k for k in indexers
if k not in data_obj.dims and k not in data_obj._level_coords]
if invalid:
raise ValueError("dimensions or multi-index levels %r do not exist"
% invalid)
level_indexers = defaultdict(dict)
dim_indexers = {}
for key, label in indexers.items():
dim, = data_obj[key].dims
if key != dim:
# assume here multi-index level indexer
level_indexers[dim][key] = label
else:
dim_indexers[key] = label
for dim, level_labels in level_indexers.items():
if dim_indexers.get(dim, False):
raise ValueError("cannot combine multi-index level indexers "
"with an indexer for dimension %s" % dim)
dim_indexers[dim] = level_labels
return dim_indexers
def remap_label_indexers(data_obj, indexers, method=None, tolerance=None):
"""Given an xarray data object and label based indexers, return a mapping
of equivalent location based indexers. Also return a mapping of updated
pandas index objects (in case of multi-index level drop).
"""
if method is not None and not isinstance(method, str):
raise TypeError('``method`` must be a string')
pos_indexers = {}
new_indexes = {}
dim_indexers = get_dim_indexers(data_obj, indexers)
for dim, label in dim_indexers.items():
try:
index = data_obj.indexes[dim]
except KeyError:
# no index for this dimension: reuse the provided labels
if method is not None or tolerance is not None:
raise ValueError('cannot supply ``method`` or ``tolerance`` '
'when the indexed dimension does not have '
'an associated coordinate.')
pos_indexers[dim] = label
else:
idxr, new_idx = convert_label_indexer(index, label,
dim, method, tolerance)
pos_indexers[dim] = idxr
if new_idx is not None:
new_indexes[dim] = new_idx
return pos_indexers, new_indexes
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + int(np.sign(step))
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step)
def _index_indexer_1d(old_indexer, applied_indexer, size):
assert isinstance(applied_indexer, integer_types + (slice, np.ndarray))
if isinstance(applied_indexer, slice) and applied_indexer == slice(None):
# shortcut for the usual case
return old_indexer
if isinstance(old_indexer, slice):
if isinstance(applied_indexer, slice):
indexer = slice_slice(old_indexer, applied_indexer, size)
else:
indexer = _expand_slice(old_indexer, size)[applied_indexer]
else:
indexer = old_indexer[applied_indexer]
return indexer
class ExplicitIndexer:
"""Base class for explicit indexer objects.
ExplicitIndexer objects wrap a tuple of values given by their ``tuple``
property. These tuples should always have length equal to the number of
dimensions on the indexed array.
Do not instantiate BaseIndexer objects directly: instead, use one of the
sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer.
"""
def __init__(self, key):
if type(self) is ExplicitIndexer: # noqa
raise TypeError('cannot instantiate base ExplicitIndexer objects')
self._key = tuple(key)
@property
def tuple(self):
return self._key
def __repr__(self):
return '{}({})'.format(type(self).__name__, self.tuple)
def as_integer_or_none(value):
return None if value is None else operator.index(value)
def as_integer_slice(value):
start = as_integer_or_none(value.start)
stop = as_integer_or_none(value.stop)
step = as_integer_or_none(value.step)
return slice(start, stop, step)
class BasicIndexer(ExplicitIndexer):
"""Tuple for basic indexing.
All elements should be int or slice objects. Indexing follows NumPy's
rules for basic indexing: each axis is independently sliced and axes
indexed with an integer are dropped from the result.
"""
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError('key must be a tuple: {!r}'.format(key))
new_key = []
for k in key:
if isinstance(k, integer_types):
k = int(k)
elif isinstance(k, slice):
k = as_integer_slice(k)
else:
raise TypeError('unexpected indexer type for {}: {!r}'
.format(type(self).__name__, k))
new_key.append(k)
super(BasicIndexer, self).__init__(new_key)
class OuterIndexer(ExplicitIndexer):
"""Tuple for outer/orthogonal indexing.
All elements should be int, slice or 1-dimensional np.ndarray objects with
an integer dtype. Indexing is applied independently along each axis, and
axes indexed with an integer are dropped from the result. This type of
indexing works like MATLAB/Fortran.
"""
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError('key must be a tuple: {!r}'.format(key))
new_key = []
for k in key:
if isinstance(k, integer_types):
k = int(k)
elif isinstance(k, slice):
k = as_integer_slice(k)
elif isinstance(k, np.ndarray):
if not np.issubdtype(k.dtype, np.integer):
raise TypeError('invalid indexer array, does not have '
'integer dtype: {!r}'.format(k))
if k.ndim != 1:
raise TypeError('invalid indexer array for {}, must have '
'exactly 1 dimension: '
.format(type(self).__name__, k))
k = np.asarray(k, dtype=np.int64)
else:
raise TypeError('unexpected indexer type for {}: {!r}'
.format(type(self).__name__, k))
new_key.append(k)
super(OuterIndexer, self).__init__(new_key)
class VectorizedIndexer(ExplicitIndexer):
"""Tuple for vectorized indexing.
All elements should be slice or N-dimensional np.ndarray objects with an
integer dtype and the same number of dimensions. Indexing follows proposed
rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules
(including broadcasting) except sliced axes are always moved to the end:
https://github.com/numpy/numpy/pull/6256
"""
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError('key must be a tuple: {!r}'.format(key))
new_key = []
ndim = None
for k in key:
if isinstance(k, slice):
k = as_integer_slice(k)
elif isinstance(k, np.ndarray):
if not np.issubdtype(k.dtype, np.integer):
raise TypeError('invalid indexer array, does not have '
'integer dtype: {!r}'.format(k))
if ndim is None:
ndim = k.ndim
elif ndim != k.ndim:
ndims = [k.ndim for k in key if isinstance(k, np.ndarray)]
raise ValueError('invalid indexer key: ndarray arguments '
'have different numbers of dimensions: {}'
.format(ndims))
k = np.asarray(k, dtype=np.int64)
else:
raise TypeError('unexpected indexer type for {}: {!r}'
.format(type(self).__name__, k))
new_key.append(k)
super(VectorizedIndexer, self).__init__(new_key)
class ExplicitlyIndexed:
"""Mixin to mark support for Indexer subclasses in indexing."""
class ExplicitlyIndexedNDArrayMixin(utils.NDArrayMixin, ExplicitlyIndexed):
def __array__(self, dtype=None):
key = BasicIndexer((slice(None),) * self.ndim)
return np.asarray(self[key], dtype=dtype)
class ImplicitToExplicitIndexingAdapter(utils.NDArrayMixin):
"""Wrap an array, converting tuples into the indicated explicit indexer."""
def __init__(self, array, indexer_cls=BasicIndexer):
self.array = as_indexable(array)
self.indexer_cls = indexer_cls
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
key = expanded_indexer(key, self.ndim)
result = self.array[self.indexer_cls(key)]
if isinstance(result, ExplicitlyIndexed):
return type(self)(result, self.indexer_cls)
else:
# Sometimes explicitly indexed arrays return NumPy arrays or
# scalars.
return result
class LazilyOuterIndexedArray(ExplicitlyIndexedNDArrayMixin):
"""Wrap an array to make basic and outer indexing lazy.
"""
def __init__(self, array, key=None):
"""
Parameters
----------
array : array_like
Array like object to index.
key : ExplicitIndexer, optional
Array indexer. If provided, it is assumed to already be in
canonical expanded form.
"""
if isinstance(array, type(self)) and key is None:
# unwrap
key = array.key
array = array.array
if key is None:
key = BasicIndexer((slice(None),) * array.ndim)
self.array = as_indexable(array)
self.key = key
def _updated_key(self, new_key):
iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim))
full_key = []
for size, k in zip(self.array.shape, self.key.tuple):
if isinstance(k, integer_types):
full_key.append(k)
else:
full_key.append(_index_indexer_1d(k, next(iter_new_key), size))
full_key = tuple(full_key)
if all(isinstance(k, integer_types + (slice, )) for k in full_key):
return BasicIndexer(full_key)
return OuterIndexer(full_key)
@property
def shape(self):
shape = []
for size, k in zip(self.array.shape, self.key.tuple):
if isinstance(k, slice):
shape.append(len(range(*k.indices(size))))
elif isinstance(k, np.ndarray):
shape.append(k.size)
return tuple(shape)
def __array__(self, dtype=None):
array = as_indexable(self.array)
return np.asarray(array[self.key], dtype=None)
def transpose(self, order):
return LazilyVectorizedIndexedArray(
self.array, self.key).transpose(order)
def __getitem__(self, indexer):
if isinstance(indexer, VectorizedIndexer):
array = LazilyVectorizedIndexedArray(self.array, self.key)
return array[indexer]
return type(self)(self.array, self._updated_key(indexer))
def __setitem__(self, key, value):
if isinstance(key, VectorizedIndexer):
raise NotImplementedError(
'Lazy item assignment with the vectorized indexer is not yet '
'implemented. Load your data first by .load() or compute().')
full_key = self._updated_key(key)
self.array[full_key] = value
def __repr__(self):
return ('%s(array=%r, key=%r)' %
(type(self).__name__, self.array, self.key))
class LazilyVectorizedIndexedArray(ExplicitlyIndexedNDArrayMixin):
"""Wrap an array to make vectorized indexing lazy.
"""
def __init__(self, array, key):
"""
Parameters
----------
array : array_like
Array like object to index.
key : VectorizedIndexer
"""
if isinstance(key, (BasicIndexer, OuterIndexer)):
self.key = _outer_to_vectorized_indexer(key, array.shape)
else:
self.key = _arrayize_vectorized_indexer(key, array.shape)
self.array = as_indexable(array)
@property
def shape(self):
return np.broadcast(*self.key.tuple).shape
def __array__(self, dtype=None):
return np.asarray(self.array[self.key], dtype=None)
def _updated_key(self, new_key):
return _combine_indexers(self.key, self.shape, new_key)
def __getitem__(self, indexer):
# If the indexed array becomes a scalar, return LazilyOuterIndexedArray
if all(isinstance(ind, integer_types) for ind in indexer.tuple):
key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple))
return LazilyOuterIndexedArray(self.array, key)
return type(self)(self.array, self._updated_key(indexer))
def transpose(self, order):
key = VectorizedIndexer(tuple(
k.transpose(order) for k in self.key.tuple))
return type(self)(self.array, key)
def __setitem__(self, key, value):
raise NotImplementedError(
'Lazy item assignment with the vectorized indexer is not yet '
'implemented. Load your data first by .load() or compute().')
def __repr__(self):
return ('%s(array=%r, key=%r)' %
(type(self).__name__, self.array, self.key))
def _wrap_numpy_scalars(array):
"""Wrap NumPy scalars in 0d arrays."""
if np.isscalar(array):
return np.array(array)
else:
return array
class CopyOnWriteArray(ExplicitlyIndexedNDArrayMixin):
def __init__(self, array):
self.array = as_indexable(array)
self._copied = False
def _ensure_copied(self):
if not self._copied:
self.array = as_indexable(np.array(self.array))
self._copied = True
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
return type(self)(_wrap_numpy_scalars(self.array[key]))
def transpose(self, order):
return self.array.transpose(order)
def __setitem__(self, key, value):
self._ensure_copied()
self.array[key] = value
class MemoryCachedArray(ExplicitlyIndexedNDArrayMixin):
def __init__(self, array):
self.array = _wrap_numpy_scalars(as_indexable(array))
def _ensure_cached(self):
if not isinstance(self.array, NumpyIndexingAdapter):
self.array = NumpyIndexingAdapter(np.asarray(self.array))
def __array__(self, dtype=None):
self._ensure_cached()
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
return type(self)(_wrap_numpy_scalars(self.array[key]))
def transpose(self, order):
return self.array.transpose(order)
def __setitem__(self, key, value):
self.array[key] = value
def as_indexable(array):
"""
This function always returns a ExplicitlyIndexed subclass,
so that the vectorized indexing is always possible with the returned
object.
"""
if isinstance(array, ExplicitlyIndexed):
return array
if isinstance(array, np.ndarray):
return NumpyIndexingAdapter(array)
if isinstance(array, pd.Index):
return PandasIndexAdapter(array)
if isinstance(array, dask_array_type):
return DaskIndexingAdapter(array)
raise TypeError('Invalid array type: {}'.format(type(array)))
def _outer_to_vectorized_indexer(key, shape):
"""Convert an OuterIndexer into an vectorized indexer.
Parameters
----------
key : Outer/Basic Indexer
An indexer to convert.
shape : tuple
Shape of the array subject to the indexing.
Returns
-------
VectorizedIndexer
Tuple suitable for use to index a NumPy array with vectorized indexing.
Each element is an array: broadcasting them together gives the shape
of the result.
"""
key = key.tuple
n_dim = len([k for k in key if not isinstance(k, integer_types)])
i_dim = 0
new_key = []
for k, size in zip(key, shape):
if isinstance(k, integer_types):
new_key.append(np.array(k).reshape((1,) * n_dim))
else: # np.ndarray or slice
if isinstance(k, slice):
k = np.arange(*k.indices(size))
assert k.dtype.kind in {'i', 'u'}
shape = [(1,) * i_dim + (k.size, ) +
(1,) * (n_dim - i_dim - 1)]
new_key.append(k.reshape(*shape))
i_dim += 1
return VectorizedIndexer(tuple(new_key))
def _outer_to_numpy_indexer(key, shape):
"""Convert an OuterIndexer into an indexer for NumPy.
Parameters
----------
key : Basic/OuterIndexer
An indexer to convert.
shape : tuple
Shape of the array subject to the indexing.
Returns
-------
tuple
Tuple suitable for use to index a NumPy array.
"""
if len([k for k in key.tuple if not isinstance(k, slice)]) <= 1:
# If there is only one vector and all others are slice,
# it can be safely used in mixed basic/advanced indexing.
# Boolean index should already be converted to integer array.
return key.tuple
else:
return _outer_to_vectorized_indexer(key, shape).tuple
def _combine_indexers(old_key, shape, new_key):
""" Combine two indexers.
Parameters
----------
old_key: ExplicitIndexer
The first indexer for the original array
shape: tuple of ints
Shape of the original array to be indexed by old_key
new_key:
The second indexer for indexing original[old_key]
"""
if not isinstance(old_key, VectorizedIndexer):
old_key = _outer_to_vectorized_indexer(old_key, shape)
if len(old_key.tuple) == 0:
return new_key
new_shape = np.broadcast(*old_key.tuple).shape
if isinstance(new_key, VectorizedIndexer):
new_key = _arrayize_vectorized_indexer(new_key, new_shape)
else:
new_key = _outer_to_vectorized_indexer(new_key, new_shape)
return VectorizedIndexer(tuple(o[new_key.tuple] for o in
np.broadcast_arrays(*old_key.tuple)))
class IndexingSupport: # could inherit from enum.Enum on Python 3
# for backends that support only basic indexer
BASIC = 'BASIC'
# for backends that support basic / outer indexer
OUTER = 'OUTER'
# for backends that support outer indexer including at most 1 vector.
OUTER_1VECTOR = 'OUTER_1VECTOR'
# for backends that support full vectorized indexer.
VECTORIZED = 'VECTORIZED'
def explicit_indexing_adapter(
key, shape, indexing_support, raw_indexing_method):
"""Support explicit indexing by delegating to a raw indexing method.
Outer and/or vectorized indexers are supported by indexing a second time
with a NumPy array.
Parameters
----------
key : ExplicitIndexer
Explicit indexing object.
shape : Tuple[int, ...]
Shape of the indexed array.
indexing_support : IndexingSupport enum
Form of indexing supported by raw_indexing_method.
raw_indexing_method: callable
Function (like ndarray.__getitem__) that when called with indexing key
in the form of a tuple returns an indexed array.
Returns
-------
Indexing result, in the form of a duck numpy-array.
"""
raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support)
result = raw_indexing_method(raw_key.tuple)
if numpy_indices.tuple:
# index the loaded np.ndarray
result = NumpyIndexingAdapter(np.asarray(result))[numpy_indices]
return result
def decompose_indexer(indexer, shape, indexing_support):
if isinstance(indexer, VectorizedIndexer):
return _decompose_vectorized_indexer(indexer, shape, indexing_support)
if isinstance(indexer, (BasicIndexer, OuterIndexer)):
return _decompose_outer_indexer(indexer, shape, indexing_support)
raise TypeError('unexpected key type: {}'.format(indexer))
def _decompose_slice(key, size):
""" convert a slice to successive two slices. The first slice always has
a positive step.
"""
start, stop, step = key.indices(size)
if step > 0:
# If key already has a positive step, use it as is in the backend
return key, slice(None)
else:
# determine stop precisely for step > 1 case
# e.g. [98:2:-2] -> [98:3:-2]
stop = start + int((stop - start - 1) / step) * step + 1
start, stop = stop + 1, start + 1
return slice(start, stop, -step), slice(None, None, -1)
def _decompose_vectorized_indexer(indexer, shape, indexing_support):
"""
Decompose vectorized indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index loaded on-memory np.ndarray.
Parameters
----------
indexer: VectorizedIndexer
indexing_support: one of IndexerSupport entries
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a vectorized indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports outer indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = OuterIndexer([0, 1, 3], [2, 3])
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = VectorizedIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # vectorized indexing for on-memory np.ndarray.
"""
assert isinstance(indexer, VectorizedIndexer)
if indexing_support is IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
backend_indexer = []
np_indexer = []
# convert negative indices
indexer = [np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k
for k, s in zip(indexer.tuple, shape)]
for k, s in zip(indexer, shape):
if isinstance(k, slice):
# If it is a slice, then we will slice it as-is
# (but make its step positive) in the backend,
# and then use all of it (slice(None)) for the in-memory portion.
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
else:
# If it is a (multidimensional) np.ndarray, just pickup the used
# keys without duplication and store them as a 1d-np.ndarray.
oind, vind = np.unique(k, return_inverse=True)
backend_indexer.append(oind)
np_indexer.append(vind.reshape(*k.shape))
backend_indexer = OuterIndexer(tuple(backend_indexer))
np_indexer = VectorizedIndexer(tuple(np_indexer))
if indexing_support is IndexingSupport.OUTER:
return backend_indexer, np_indexer
# If the backend does not support outer indexing,
# backend_indexer (OuterIndexer) is also decomposed.
backend_indexer, np_indexer1 = _decompose_outer_indexer(
backend_indexer, shape, indexing_support)
np_indexer = _combine_indexers(np_indexer1, shape, np_indexer)
return backend_indexer, np_indexer
def _decompose_outer_indexer(indexer, shape, indexing_support):
"""
Decompose outer indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index the loaded on-memory np.ndarray.
Parameters
----------
indexer: VectorizedIndexer
indexing_support: One of the entries of IndexingSupport
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports basic indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = BasicIndexer(slice(0, 3), slice(2, 3))
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = OuterIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # outer indexing for on-memory np.ndarray.
"""
if indexing_support == IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
assert isinstance(indexer, (OuterIndexer, BasicIndexer))
backend_indexer = []
np_indexer = []
# make indexer positive
pos_indexer = []
for k, s in zip(indexer.tuple, shape):
if isinstance(k, np.ndarray):
pos_indexer.append(np.where(k < 0, k + s, k))
elif isinstance(k, integer_types) and k < 0:
pos_indexer.append(k + s)
else:
pos_indexer.append(k)
indexer = pos_indexer
if indexing_support is IndexingSupport.OUTER_1VECTOR:
# some backends such as h5py supports only 1 vector in indexers
# We choose the most efficient axis
gains = [(np.max(k) - np.min(k) + 1.0) / len(np.unique(k))
if isinstance(k, np.ndarray) else 0 for k in indexer]
array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None
for i, (k, s) in enumerate(zip(indexer, shape)):
if isinstance(k, np.ndarray) and i != array_index:
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, np.ndarray):
# Remove duplicates and sort them in the increasing order
pkey, ekey = np.unique(k, return_inverse=True)
backend_indexer.append(pkey)
np_indexer.append(ekey)
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (OuterIndexer(tuple(backend_indexer)),
OuterIndexer(tuple(np_indexer)))
if indexing_support == IndexingSupport.OUTER:
for k, s in zip(indexer, shape):
if isinstance(k, slice):
# slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
elif isinstance(k, integer_types):
backend_indexer.append(k)
elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all():
backend_indexer.append(k)
np_indexer.append(slice(None))
else:
# Remove duplicates and sort them in the increasing order
oind, vind = np.unique(k, return_inverse=True)
backend_indexer.append(oind)
np_indexer.append(vind.reshape(*k.shape))
return (OuterIndexer(tuple(backend_indexer)),
OuterIndexer(tuple(np_indexer)))
# basic indexer
assert indexing_support == IndexingSupport.BASIC
for k, s in zip(indexer, shape):
if isinstance(k, np.ndarray):
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (BasicIndexer(tuple(backend_indexer)),
OuterIndexer(tuple(np_indexer)))
def _arrayize_vectorized_indexer(indexer, shape):
""" Return an identical vindex but slices are replaced by arrays """
slices = [v for v in indexer.tuple if isinstance(v, slice)]
if len(slices) == 0:
return indexer
arrays = [v for v in indexer.tuple if isinstance(v, np.ndarray)]
n_dim = arrays[0].ndim if len(arrays) > 0 else 0
i_dim = 0
new_key = []
for v, size in zip(indexer.tuple, shape):
if isinstance(v, np.ndarray):
new_key.append(np.reshape(v, v.shape + (1, ) * len(slices)))
else: # slice
shape = ((1,) * (n_dim + i_dim) + (-1,) +
(1,) * (len(slices) - i_dim - 1))
new_key.append(np.arange(*v.indices(size)).reshape(shape))
i_dim += 1
return VectorizedIndexer(tuple(new_key))
def _dask_array_with_chunks_hint(array, chunks):
"""Create a dask array using the chunks hint for dimensions of size > 1."""
import dask.array as da
if len(chunks) < array.ndim:
raise ValueError('not enough chunks in hint')
new_chunks = []
for chunk, size in zip(chunks, array.shape):
new_chunks.append(chunk if size > 1 else (1,))
return da.from_array(array, new_chunks)
def _logical_any(args):
return functools.reduce(operator.or_, args)
def _masked_result_drop_slice(key, chunks_hint=None):
key = (k for k in key if not isinstance(k, slice))
if chunks_hint is not None:
key = [_dask_array_with_chunks_hint(k, chunks_hint)
if isinstance(k, np.ndarray) else k
for k in key]
return _logical_any(k == -1 for k in key)
def create_mask(indexer, shape, chunks_hint=None):
"""Create a mask for indexing with a fill-value.
Parameters
----------
indexer : ExplicitIndexer
Indexer with -1 in integer or ndarray value to indicate locations in
the result that should be masked.
shape : tuple
Shape of the array being indexed.
chunks_hint : tuple, optional
Optional tuple indicating desired chunks for the result. If provided,
used as a hint for chunks on the resulting dask. Must have a hint for
each dimension on the result array.
Returns
-------
mask : bool, np.ndarray or dask.array.Array with dtype=bool
Dask array if chunks_hint is provided, otherwise a NumPy array. Has the
same shape as the indexing result.
"""
if isinstance(indexer, OuterIndexer):
key = _outer_to_vectorized_indexer(indexer, shape).tuple
assert not any(isinstance(k, slice) for k in key)
mask = _masked_result_drop_slice(key, chunks_hint)
elif isinstance(indexer, VectorizedIndexer):
key = indexer.tuple
base_mask = _masked_result_drop_slice(key, chunks_hint)
slice_shape = tuple(np.arange(*k.indices(size)).size
for k, size in zip(key, shape)
if isinstance(k, slice))
expanded_mask = base_mask[
(Ellipsis,) + (np.newaxis,) * len(slice_shape)]
mask = duck_array_ops.broadcast_to(
expanded_mask, base_mask.shape + slice_shape)
elif isinstance(indexer, BasicIndexer):
mask = any(k == -1 for k in indexer.tuple)
else:
raise TypeError('unexpected key type: {}'.format(type(indexer)))
return mask
def _posify_mask_subindexer(index):
"""Convert masked indices in a flat array to the nearest unmasked index.
Parameters
----------
index : np.ndarray
One dimensional ndarray with dtype=int.
Returns
-------
np.ndarray
One dimensional ndarray with all values equal to -1 replaced by an
adjacent non-masked element.
"""
masked = index == -1
unmasked_locs = np.flatnonzero(~masked)
if not unmasked_locs.size:
# indexing unmasked_locs is invalid
return np.zeros_like(index)
masked_locs = np.flatnonzero(masked)
prev_value = np.maximum(0, np.searchsorted(unmasked_locs, masked_locs) - 1)
new_index = index.copy()
new_index[masked_locs] = index[unmasked_locs[prev_value]]
return new_index
def posify_mask_indexer(indexer):
"""Convert masked values (-1) in an indexer to nearest unmasked values.
This routine is useful for dask, where it can be much faster to index
adjacent points than arbitrary points from the end of an array.
Parameters
----------
indexer : ExplicitIndexer
Input indexer.
Returns
-------
ExplicitIndexer
Same type of input, with all values in ndarray keys equal to -1
replaced by an adjacent non-masked element.
"""
key = tuple(_posify_mask_subindexer(k.ravel()).reshape(k.shape)
if isinstance(k, np.ndarray) else k
for k in indexer.tuple)
return type(indexer)(key)
class NumpyIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a NumPy array to use explicit indexing."""
def __init__(self, array):
# In NumpyIndexingAdapter we only allow to store bare np.ndarray
if not isinstance(array, np.ndarray):
raise TypeError('NumpyIndexingAdapter only wraps np.ndarray. '
'Trying to wrap {}'.format(type(array)))
self.array = array
def _indexing_array_and_key(self, key):
if isinstance(key, OuterIndexer):
array = self.array
key = _outer_to_numpy_indexer(key, self.array.shape)
elif isinstance(key, VectorizedIndexer):
array = nputils.NumpyVIndexAdapter(self.array)
key = key.tuple
elif isinstance(key, BasicIndexer):
array = self.array
# We want 0d slices rather than scalars. This is achieved by
# appending an ellipsis (see
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes). # noqa
key = key.tuple + (Ellipsis,)
else:
raise TypeError('unexpected key type: {}'.format(type(key)))
return array, key
def transpose(self, order):
return self.array.transpose(order)
def __getitem__(self, key):
array, key = self._indexing_array_and_key(key)
return array[key]
def __setitem__(self, key, value):
array, key = self._indexing_array_and_key(key)
array[key] = value
class DaskIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a dask array to support explicit indexing."""
def __init__(self, array):
""" This adapter is created in Variable.__getitem__ in
Variable._broadcast_indexes.
"""
self.array = array
def __getitem__(self, key):
if isinstance(key, BasicIndexer):
return self.array[key.tuple]
elif isinstance(key, VectorizedIndexer):
return self.array.vindex[key.tuple]
else:
assert isinstance(key, OuterIndexer)
key = key.tuple
try:
return self.array[key]
except NotImplementedError:
# manual orthogonal indexing.
# TODO: port this upstream into dask in a saner way.
value = self.array
for axis, subkey in reversed(list(enumerate(key))):
value = value[(slice(None),) * axis + (subkey,)]
return value
def __setitem__(self, key, value):
raise TypeError("this variable's data is stored in a dask array, "
'which does not support item assignment. To '
'assign to this variable, you must first load it '
'into memory explicitly using the .load() '
'method or accessing its .values attribute.')
def transpose(self, order):
return self.array.transpose(order)
class PandasIndexAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a pandas.Index to preserve dtypes and handle explicit indexing."""
def __init__(self, array, dtype=None):
self.array = utils.safe_cast_to_index(array)
if dtype is None:
if isinstance(array, pd.PeriodIndex):
dtype = np.dtype('O')
elif hasattr(array, 'categories'):
# category isn't a real numpy dtype
dtype = array.categories.dtype
elif not utils.is_valid_numpy_dtype(array.dtype):
dtype = np.dtype('O')
else:
dtype = array.dtype
self._dtype = dtype
@property
def dtype(self):
return self._dtype
def __array__(self, dtype=None):
if dtype is None:
dtype = self.dtype
array = self.array
if isinstance(array, pd.PeriodIndex):
with suppress(AttributeError):
# this might not be public API
array = array.astype('object')
return np.asarray(array.values, dtype=dtype)
@property
def shape(self):
# .shape is broken on pandas prior to v0.15.2
return (len(self.array),)
def __getitem__(self, indexer):
key = indexer.tuple
if isinstance(key, tuple) and len(key) == 1:
# unpack key so it can index a pandas.Index object (pandas.Index
# objects don't like tuples)
key, = key
if getattr(key, 'ndim', 0) > 1: # Return np-array if multidimensional
return NumpyIndexingAdapter(self.array.values)[indexer]
result = self.array[key]
if isinstance(result, pd.Index):
result = PandasIndexAdapter(result, dtype=self.dtype)
else:
# result is a scalar
if result is pd.NaT:
# work around the impossibility of casting NaT with asarray
# note: it probably would be better in general to return
# pd.Timestamp rather np.than datetime64 but this is easier
# (for now)
result = np.datetime64('NaT', 'ns')
elif isinstance(result, timedelta):
result = np.timedelta64(getattr(result, 'value', result), 'ns')
elif isinstance(result, pd.Timestamp):
# Work around for GH: pydata/xarray#1932 and numpy/numpy#10668
# numpy fails to convert pd.Timestamp to np.datetime64[ns]
result = np.asarray(result.to_datetime64())
elif self.dtype != object:
result = np.asarray(result, dtype=self.dtype)
# as for numpy.ndarray indexing, we always want the result to be
# a NumPy array.
result = utils.to_0d_array(result)
return result
def transpose(self, order):
return self.array # self.array should be always one-dimensional
def __repr__(self):
return ('%s(array=%r, dtype=%r)'
% (type(self).__name__, self.array, self.dtype))
|
apache-2.0
|
Agent007/deepchem
|
deepchem/models/tests/test_reload.py
|
2
|
1376
|
"""
Test reload for trained models.
"""
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import unittest
import tempfile
import numpy as np
import deepchem as dc
import tensorflow as tf
from sklearn.ensemble import RandomForestClassifier
class TestReload(unittest.TestCase):
def test_sklearn_reload(self):
"""Test that trained model can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model_dir = tempfile.mkdtemp()
model = dc.models.SklearnModel(sklearn_model, model_dir)
# Fit trained model
model.fit(dataset)
model.save()
# Load trained model
reloaded_model = dc.models.SklearnModel(None, model_dir)
reloaded_model.reload()
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
|
mit
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/mpl_toolkits/axes_grid1/axes_divider.py
|
1
|
29627
|
"""
The axes_divider module provide helper classes to adjust the positions of
multiple axes at the drawing time.
Divider: this is the class that is used calculates the axes
position. It divides the given rectangular area into several sub
rectangles. You initialize the divider by setting the horizontal
and vertical list of sizes that the division will be based on. You
then use the new_locator method, whose return value is a callable
object that can be used to set the axes_locator of the axes.
"""
import matplotlib.transforms as mtransforms
from matplotlib.axes import SubplotBase
from . import axes_size as Size
class Divider(object):
"""
This is the class that is used calculates the axes position. It
divides the given rectangular area into several
sub-rectangles. You initialize the divider by setting the
horizontal and vertical lists of sizes
(:mod:`mpl_toolkits.axes_grid.axes_size`) that the division will
be based on. You then use the new_locator method to create a
callable object that can be used to as the axes_locator of the
axes.
"""
def __init__(self, fig, pos, horizontal, vertical, aspect=None, anchor="C"):
"""
:param fig: matplotlib figure
:param pos: position (tuple of 4 floats) of the rectangle that
will be divided.
:param horizontal: list of sizes
(:mod:`~mpl_toolkits.axes_grid.axes_size`)
for horizontal division
:param vertical: list of sizes
(:mod:`~mpl_toolkits.axes_grid.axes_size`)
for vertical division
:param aspect: if True, the overall rectangular area is reduced
so that the relative part of the horizontal and
vertical scales have same scale.
:param anchor: Determine how the reduced rectangle is placed
when aspect is True.
"""
self._fig = fig
self._pos = pos
self._horizontal = horizontal
self._vertical = vertical
self._anchor = anchor
self._aspect = aspect
self._xrefindex = 0
self._yrefindex = 0
self._locator = None
def get_horizontal_sizes(self, renderer):
return [s.get_size(renderer) for s in self.get_horizontal()]
def get_vertical_sizes(self, renderer):
return [s.get_size(renderer) for s in self.get_vertical()]
def get_vsize_hsize(self):
from .axes_size import AddList
vsize = AddList(self.get_vertical())
hsize = AddList(self.get_horizontal())
return vsize, hsize
@staticmethod
def _calc_k(l, total_size):
rs_sum, as_sum = 0., 0.
for _rs, _as in l:
rs_sum += _rs
as_sum += _as
if rs_sum != 0.:
k = (total_size - as_sum) / rs_sum
return k
else:
return 0.
@staticmethod
def _calc_offsets(l, k):
offsets = [0.]
#for s in l:
for _rs, _as in l:
#_rs, _as = s.get_size(renderer)
offsets.append(offsets[-1] + _rs*k + _as)
return offsets
def set_position(self, pos):
"""
set the position of the rectangle.
:param pos: position (tuple of 4 floats) of the rectangle that
will be divided.
"""
self._pos = pos
def get_position(self):
"return the position of the rectangle."
return self._pos
def set_anchor(self, anchor):
"""
:param anchor: anchor position
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in list(mtransforms.Bbox.coefs.keys()) or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(list(mtransforms.BBox.coefs.keys())))
def get_anchor(self):
"return the anchor"
return self._anchor
def set_horizontal(self, h):
"""
:param horizontal: list of sizes
(:mod:`~mpl_toolkits.axes_grid.axes_size`)
for horizontal division
.
"""
self._horizontal = h
def get_horizontal(self):
"return horizontal sizes"
return self._horizontal
def set_vertical(self, v):
"""
:param horizontal: list of sizes
(:mod:`~mpl_toolkits.axes_grid.axes_size`)
for horizontal division
.
"""
self._vertical = v
def get_vertical(self):
"return vertical sizes"
return self._vertical
def set_aspect(self, aspect=False):
"""
:param anchor: True or False
"""
self._aspect = aspect
def get_aspect(self):
"return aspect"
return self._aspect
def set_locator(self, _locator):
self._locator = _locator
def get_locator(self):
return self._locator
def get_position_runtime(self, ax, renderer):
if self._locator is None:
return self.get_position()
else:
return self._locator(ax, renderer).bounds
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
figW,figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
hsizes = self.get_horizontal_sizes(renderer)
vsizes = self.get_vertical_sizes(renderer)
k_h = self._calc_k(hsizes, figW*w)
k_v = self._calc_k(vsizes, figH*h)
if self.get_aspect():
k = min(k_h, k_v)
ox = self._calc_offsets(hsizes, k)
oy = self._calc_offsets(vsizes, k)
ww = (ox[-1] - ox[0])/figW
hh = (oy[-1] - oy[0])/figH
pb = mtransforms.Bbox.from_bounds(x, y, w, h)
pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
pb1_anchored = pb1.anchored(self.get_anchor(), pb)
x0, y0 = pb1_anchored.x0, pb1_anchored.y0
else:
ox = self._calc_offsets(hsizes, k_h)
oy = self._calc_offsets(vsizes, k_v)
x0, y0 = x, y
if nx1 is None:
nx1=nx+1
if ny1 is None:
ny1=ny+1
x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
def new_locator(self, nx, ny, nx1=None, ny1=None):
"""
returns a new locator
(:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
specified cell.
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
return AxesLocator(self, nx, ny, nx1, ny1)
def append_size(self, position, size):
if position == "left":
self._horizontal.insert(0, size)
self._xrefindex += 1
elif position == "right":
self._horizontal.append(size)
elif position == "bottom":
self._vertical.insert(0, size)
self._yrefindex += 1
elif position == "top":
self._vertical.append(size)
else:
raise ValueError("the position must be one of left, right, bottom, or top")
def add_auto_adjustable_area(self,
use_axes, pad=0.1,
adjust_dirs=["left", "right", "bottom", "top"],
):
from .axes_size import Padded, SizeFromFunc, GetExtentHelper
for d in adjust_dirs:
helper = GetExtentHelper(use_axes, d)
size = SizeFromFunc(helper)
padded_size = Padded(size, pad) # pad in inch
self.append_size(d, padded_size)
class AxesLocator(object):
"""
A simple callable object, initialized with AxesDivider class,
returns the position and size of the given cell.
"""
def __init__(self, axes_divider, nx, ny, nx1=None, ny1=None):
"""
:param axes_divider: An instance of AxesDivider class.
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
self._axes_divider = axes_divider
_xrefindex = axes_divider._xrefindex
_yrefindex = axes_divider._yrefindex
self._nx, self._ny = nx - _xrefindex, ny - _yrefindex
if nx1 is None:
nx1 = nx+1
if ny1 is None:
ny1 = ny+1
self._nx1 = nx1 - _xrefindex
self._ny1 = ny1 - _yrefindex
def __call__(self, axes, renderer):
_xrefindex = self._axes_divider._xrefindex
_yrefindex = self._axes_divider._yrefindex
return self._axes_divider.locate(self._nx + _xrefindex,
self._ny + _yrefindex,
self._nx1 + _xrefindex,
self._ny1 + _yrefindex,
axes,
renderer)
def get_subplotspec(self):
if hasattr(self._axes_divider, "get_subplotspec"):
return self._axes_divider.get_subplotspec()
else:
return None
from matplotlib.gridspec import SubplotSpec, GridSpec
class SubplotDivider(Divider):
"""
The Divider class whose rectangle area is specified as a subplot geometry.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
if isinstance(args[0], SubplotSpec):
self._subplotspec = args[0]
else:
try:
s = str(int(args[0]))
rows, cols, num = list(map(int, s))
except ValueError:
raise ValueError(
'Single argument to subplot must be a 3-digit integer')
self._subplotspec = GridSpec(rows, cols)[num-1]
# num - 1 for converting from MATLAB to python indexing
elif len(args)==3:
rows, cols, num = args
rows = int(rows)
cols = int(cols)
if isinstance(num, tuple) and len(num) == 2:
num = [int(n) for n in num]
self._subplotspec = GridSpec(rows, cols)[num[0]-1:num[1]]
else:
self._subplotspec = GridSpec(rows, cols)[int(num)-1]
# num - 1 for converting from MATLAB to python indexing
else:
raise ValueError('Illegal argument(s) to subplot: %s' % (args,))
# total = rows*cols
# num -= 1 # convert from matlab to python indexing
# # ie num in range(0,total)
# if num >= total:
# raise ValueError( 'Subplot number exceeds total subplots')
# self._rows = rows
# self._cols = cols
# self._num = num
# self.update_params()
# sets self.fixbox
self.update_params()
pos = self.figbox.bounds
horizontal = kwargs.pop("horizontal", [])
vertical = kwargs.pop("vertical", [])
aspect = kwargs.pop("aspect", None)
anchor = kwargs.pop("anchor", "C")
if kwargs:
raise Exception("")
Divider.__init__(self, fig, pos, horizontal, vertical,
aspect=aspect, anchor=anchor)
def get_position(self):
"return the bounds of the subplot box"
self.update_params() # update self.figbox
return self.figbox.bounds
# def update_params(self):
# 'update the subplot position from fig.subplotpars'
# rows = self._rows
# cols = self._cols
# num = self._num
# pars = self.figure.subplotpars
# left = pars.left
# right = pars.right
# bottom = pars.bottom
# top = pars.top
# wspace = pars.wspace
# hspace = pars.hspace
# totWidth = right-left
# totHeight = top-bottom
# figH = totHeight/(rows + hspace*(rows-1))
# sepH = hspace*figH
# figW = totWidth/(cols + wspace*(cols-1))
# sepW = wspace*figW
# rowNum, colNum = divmod(num, cols)
# figBottom = top - (rowNum+1)*figH - rowNum*sepH
# figLeft = left + colNum*(figW + sepW)
# self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
# figW, figH)
def update_params(self):
'update the subplot position from fig.subplotpars'
self.figbox = self.get_subplotspec().get_position(self.figure)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
return rows, cols, num1+1 # for compatibility
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, e.g., from 1,1,1 to 2,2,3'
self._subplotspec = GridSpec(numrows, numcols)[num-1]
self.update_params()
self.set_position(self.figbox)
def get_subplotspec(self):
'get the SubplotSpec instance'
return self._subplotspec
def set_subplotspec(self, subplotspec):
'set the SubplotSpec instance'
self._subplotspec = subplotspec
class AxesDivider(Divider):
"""
Divider based on the pre-existing axes.
"""
def __init__(self, axes, xref=None, yref=None):
"""
:param axes: axes
"""
self._axes = axes
if xref==None:
self._xref = Size.AxesX(axes)
else:
self._xref = xref
if yref==None:
self._yref = Size.AxesY(axes)
else:
self._yref = yref
Divider.__init__(self, fig=axes.get_figure(), pos=None,
horizontal=[self._xref], vertical=[self._yref],
aspect=None, anchor="C")
def _get_new_axes(self, **kwargs):
axes = self._axes
axes_class = kwargs.pop("axes_class", None)
if axes_class is None:
if isinstance(axes, SubplotBase):
axes_class = axes._axes_class
else:
axes_class = type(axes)
ax = axes_class(axes.get_figure(),
axes.get_position(original=True), **kwargs)
return ax
def new_horizontal(self, size, pad=None, pack_start=False, **kwargs):
"""
Add a new axes on the right (or left) side of the main axes.
:param size: A width of the axes. A :mod:`~mpl_toolkits.axes_grid.axes_size`
instance or if float or string is given, *from_any*
function is used to create one, with *ref_size* set to AxesX instance
of the current axes.
:param pad: pad between the axes. It takes same argument as *size*.
:param pack_start: If False, the new axes is appended at the end
of the list, i.e., it became the right-most axes. If True, it is
inserted at the start of the list, and becomes the left-most axes.
All extra keywords arguments are passed to the created axes.
If *axes_class* is given, the new axes will be created as an
instance of the given class. Otherwise, the same class of the
main axes will be used.
"""
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad,
fraction_ref=self._xref)
if pack_start:
self._horizontal.insert(0, pad)
self._xrefindex += 1
else:
self._horizontal.append(pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size,
fraction_ref=self._xref)
if pack_start:
self._horizontal.insert(0, size)
self._xrefindex += 1
locator = self.new_locator(nx=0, ny=0)
else:
self._horizontal.append(size)
locator = self.new_locator(nx=len(self._horizontal)-1, ny=0)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def new_vertical(self, size, pad=None, pack_start=False, **kwargs):
"""
Add a new axes on the top (or bottom) side of the main axes.
:param size: A height of the axes. A :mod:`~mpl_toolkits.axes_grid.axes_size`
instance or if float or string is given, *from_any*
function is used to create one, with *ref_size* set to AxesX instance
of the current axes.
:param pad: pad between the axes. It takes same argument as *size*.
:param pack_start: If False, the new axes is appended at the end
of the list, i.e., it became the top-most axes. If True, it is
inserted at the start of the list, and becomes the bottom-most axes.
All extra keywords arguments are passed to the created axes.
If *axes_class* is given, the new axes will be created as an
instance of the given class. Otherwise, the same class of the
main axes will be used.
"""
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad,
fraction_ref=self._yref)
if pack_start:
self._vertical.insert(0, pad)
self._yrefindex += 1
else:
self._vertical.append(pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size,
fraction_ref=self._yref)
if pack_start:
self._vertical.insert(0, size)
self._yrefindex += 1
locator = self.new_locator(nx=0, ny=0)
else:
self._vertical.append(size)
locator = self.new_locator(nx=0, ny=len(self._vertical)-1)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def append_axes(self, position, size, pad=None, add_to_figure=True,
**kwargs):
"""
create an axes at the given *position* with the same height
(or width) of the main axes.
*position*
["left"|"right"|"bottom"|"top"]
*size* and *pad* should be axes_grid.axes_size compatible.
"""
if position == "left":
ax = self.new_horizontal(size, pad, pack_start=True, **kwargs)
elif position == "right":
ax = self.new_horizontal(size, pad, pack_start=False, **kwargs)
elif position == "bottom":
ax = self.new_vertical(size, pad, pack_start=True, **kwargs)
elif position == "top":
ax = self.new_vertical(size, pad, pack_start=False, **kwargs)
else:
raise ValueError("the position must be one of left, right, bottom, or top")
if add_to_figure:
self._fig.add_axes(ax)
return ax
def get_aspect(self):
if self._aspect is None:
aspect = self._axes.get_aspect()
if aspect == "auto":
return False
else:
return True
else:
return self._aspect
def get_position(self):
if self._pos is None:
bbox = self._axes.get_position(original=True)
return bbox.bounds
else:
return self._pos
def get_anchor(self):
if self._anchor is None:
return self._axes.get_anchor()
else:
return self._anchor
def get_subplotspec(self):
if hasattr(self._axes, "get_subplotspec"):
return self._axes.get_subplotspec()
else:
return None
class HBoxDivider(SubplotDivider):
def __init__(self, fig, *args, **kwargs):
SubplotDivider.__init__(self, fig, *args, **kwargs)
@staticmethod
def _determine_karray(equivalent_sizes, appended_sizes,
max_equivalent_size,
total_appended_size):
n = len(equivalent_sizes)
import numpy as np
A = np.mat(np.zeros((n+1, n+1), dtype="d"))
B = np.zeros((n+1), dtype="d")
# AxK = B
# populated A
for i, (r, a) in enumerate(equivalent_sizes):
A[i,i] = r
A[i,-1] = -1
B[i] = -a
A[-1,:-1] = [r for r, a in appended_sizes]
B[-1] = total_appended_size - sum([a for rs, a in appended_sizes])
karray_H = (A.I*np.mat(B).T).A1
karray = karray_H[:-1]
H = karray_H[-1]
if H > max_equivalent_size:
karray = (max_equivalent_size - \
np.array([a for r, a in equivalent_sizes])) \
/ np.array([r for r, a in equivalent_sizes])
return karray
@staticmethod
def _calc_offsets(appended_sizes, karray):
offsets = [0.]
#for s in l:
for (r, a), k in zip(appended_sizes, karray):
offsets.append(offsets[-1] + r*k + a)
return offsets
def new_locator(self, nx, nx1=None):
"""
returns a new locator
(:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
specified cell.
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
return AxesLocator(self, nx, 0, nx1, None)
def _locate(self, x, y, w, h,
y_equivalent_sizes, x_appended_sizes,
figW, figH):
"""
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
equivalent_sizes = y_equivalent_sizes
appended_sizes = x_appended_sizes
max_equivalent_size = figH*h
total_appended_size = figW*w
karray = self._determine_karray(equivalent_sizes, appended_sizes,
max_equivalent_size,
total_appended_size)
ox = self._calc_offsets(appended_sizes, karray)
ww = (ox[-1] - ox[0])/figW
ref_h = equivalent_sizes[0]
hh = (karray[0]*ref_h[0] + ref_h[1])/figH
pb = mtransforms.Bbox.from_bounds(x, y, w, h)
pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
pb1_anchored = pb1.anchored(self.get_anchor(), pb)
x0, y0 = pb1_anchored.x0, pb1_anchored.y0
return x0, y0, ox, hh
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
figW,figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
y_equivalent_sizes = self.get_vertical_sizes(renderer)
x_appended_sizes = self.get_horizontal_sizes(renderer)
x0, y0, ox, hh = self._locate(x, y, w, h,
y_equivalent_sizes, x_appended_sizes,
figW, figH)
if nx1 is None:
nx1=nx+1
x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
y1, h1 = y0, hh
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
class VBoxDivider(HBoxDivider):
"""
The Divider class whose rectangle area is specified as a subplot geometry.
"""
def new_locator(self, ny, ny1=None):
"""
returns a new locator
(:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
specified cell.
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
return AxesLocator(self, 0, ny, None, ny1)
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
:param nx, nx1: Integers specifying the column-position of the
cell. When nx1 is None, a single nx-th column is
specified. Otherwise location of columns spanning between nx
to nx1 (but excluding nx1-th column) is specified.
:param ny, ny1: same as nx and nx1, but for row positions.
"""
figW,figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
x_equivalent_sizes = self.get_horizontal_sizes(renderer)
y_appended_sizes = self.get_vertical_sizes(renderer)
y0, x0, oy, ww = self._locate(y, x, h, w,
x_equivalent_sizes, y_appended_sizes,
figH, figW)
if ny1 is None:
ny1=ny+1
x1, w1 = x0, ww
y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
class LocatableAxesBase:
def __init__(self, *kl, **kw):
self._axes_class.__init__(self, *kl, **kw)
self._locator = None
self._locator_renderer = None
def set_axes_locator(self, locator):
self._locator = locator
def get_axes_locator(self):
return self._locator
def apply_aspect(self, position=None):
if self.get_axes_locator() is None:
self._axes_class.apply_aspect(self, position)
else:
pos = self.get_axes_locator()(self, self._locator_renderer)
self._axes_class.apply_aspect(self, position=pos)
def draw(self, renderer=None, inframe=False):
self._locator_renderer = renderer
self._axes_class.draw(self, renderer, inframe)
_locatableaxes_classes = {}
def locatable_axes_factory(axes_class):
new_class = _locatableaxes_classes.get(axes_class)
if new_class is None:
new_class = type("Locatable%s" % (axes_class.__name__),
(LocatableAxesBase, axes_class),
{'_axes_class': axes_class})
_locatableaxes_classes[axes_class] = new_class
return new_class
#if hasattr(maxes.Axes, "get_axes_locator"):
# LocatableAxes = maxes.Axes
#else:
def make_axes_locatable(axes):
if not hasattr(axes, "set_axes_locator"):
new_class = locatable_axes_factory(type(axes))
axes.__class__ = new_class
divider = AxesDivider(axes)
locator = divider.new_locator(nx=0, ny=0)
axes.set_axes_locator(locator)
return divider
def make_axes_area_auto_adjustable(ax,
use_axes=None, pad=0.1,
adjust_dirs=["left", "right", "bottom", "top"]):
divider = make_axes_locatable(ax)
if use_axes is None:
use_axes = ax
divider.add_auto_adjustable_area(use_axes=use_axes, pad=pad,
adjust_dirs=adjust_dirs)
#from matplotlib.axes import Axes
from .mpl_axes import Axes
LocatableAxes = locatable_axes_factory(Axes)
|
gpl-3.0
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/sklearn/datasets/species_distributions.py
|
7
|
7758
|
"""
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: Simplified BSD
from cStringIO import StringIO
from os import makedirs
from os.path import join
from os.path import exists
import urllib2
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6,
dtype=np.int16):
"""
load a coverage file.
This will return a numpy array of the given dtype
"""
try:
header = [F.readline() for i in range(header_length)]
except:
F = open(F)
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header['NODATA_value']
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Paramters
---------
F : string or file object
file object or name of file
Returns
-------
rec : np.ndarray
record array representing the data
"""
try:
names = F.readline().strip().split(',')
except:
F = open(F)
names = F.readline().strip().split(',')
rec = np.loadtxt(F, skiprows=1, delimiter=',',
dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print 'Downloading species data from %s to %s' % (SAMPLES_URL,
data_home)
X = np.load(StringIO(urllib2.urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = StringIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print 'Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home)
X = np.load(StringIO(urllib2.urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = StringIO(X[f])
print ' - converting', f
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages,
dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
|
agpl-3.0
|
kernc/scikit-learn
|
sklearn/neighbors/base.py
|
30
|
30564
|
"""Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
|
bsd-3-clause
|
robin-lai/scikit-learn
|
sklearn/neural_network/rbm.py
|
206
|
12292
|
"""Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
|
bsd-3-clause
|
florentchandelier/zipline
|
tests/pipeline/test_classifier.py
|
2
|
17221
|
from functools import reduce
from operator import or_
import numpy as np
import pandas as pd
from zipline.lib.labelarray import LabelArray
from zipline.pipeline import Classifier
from zipline.testing import parameter_space
from zipline.testing.fixtures import ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.numpy_utils import (
categorical_dtype,
int64_dtype,
)
from .base import BasePipelineTestCase
bytes_dtype = np.dtype('S3')
unicode_dtype = np.dtype('U3')
class ClassifierTestCase(BasePipelineTestCase):
@parameter_space(mv=[-1, 0, 1, 999])
def test_integral_isnull(self, mv):
class C(Classifier):
dtype = int64_dtype
missing_value = mv
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of missing and non-missing values.
data = np.array([[-1, 1, 0, 2],
[3, 0, 1, 0],
[-5, 0, -1, 0],
[-3, 1, 2, 2]], dtype=int64_dtype)
self.check_terms(
terms={
'isnull': c.isnull(),
'notnull': c.notnull()
},
expected={
'isnull': data == mv,
'notnull': data != mv,
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(mv=['0', None])
def test_string_isnull(self, mv):
class C(Classifier):
dtype = categorical_dtype
missing_value = mv
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of missing and non-missing values.
raw = np.asarray(
[['', 'a', 'ab', 'ba'],
['z', 'ab', 'a', 'ab'],
['aa', 'ab', '', 'ab'],
['aa', 'a', 'ba', 'ba']],
dtype=categorical_dtype,
)
data = LabelArray(raw, missing_value=mv)
self.check_terms(
terms={
'isnull': c.isnull(),
'notnull': c.notnull()
},
expected={
'isnull': np.equal(raw, mv),
'notnull': np.not_equal(raw, mv),
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(compval=[0, 1, 999])
def test_eq(self, compval):
class C(Classifier):
dtype = int64_dtype
missing_value = -1
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of the comparison value and other values.
data = np.array([[-1, 1, 0, 2],
[3, 0, 1, 0],
[-5, 0, -1, 0],
[-3, 1, 2, 2]], dtype=int64_dtype)
self.check_terms(
terms={
'eq': c.eq(compval),
},
expected={
'eq': (data == compval),
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(
__fail_fast=True,
compval=['a', 'ab', 'not in the array'],
labelarray_dtype=(bytes_dtype, categorical_dtype, unicode_dtype),
)
def test_string_eq(self, compval, labelarray_dtype):
compval = labelarray_dtype.type(compval)
class C(Classifier):
dtype = categorical_dtype
missing_value = ''
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of the comparison value and other values.
data = LabelArray(
np.asarray(
[['', 'a', 'ab', 'ba'],
['z', 'ab', 'a', 'ab'],
['aa', 'ab', '', 'ab'],
['aa', 'a', 'ba', 'ba']],
dtype=labelarray_dtype,
),
missing_value='',
)
self.check_terms(
terms={
'eq': c.eq(compval),
},
expected={
'eq': (data == compval),
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(
missing=[-1, 0, 1],
dtype_=[int64_dtype, categorical_dtype],
)
def test_disallow_comparison_to_missing_value(self, missing, dtype_):
if dtype_ == categorical_dtype:
missing = str(missing)
class C(Classifier):
dtype = dtype_
missing_value = missing
inputs = ()
window_length = 0
with self.assertRaises(ValueError) as e:
C().eq(missing)
errmsg = str(e.exception)
self.assertEqual(
errmsg,
"Comparison against self.missing_value ({v!r}) in C.eq().\n"
"Missing values have NaN semantics, so the requested comparison"
" would always produce False.\n"
"Use the isnull() method to check for missing values.".format(
v=missing,
),
)
@parameter_space(compval=[0, 1, 999], missing=[-1, 0, 999])
def test_not_equal(self, compval, missing):
class C(Classifier):
dtype = int64_dtype
missing_value = missing
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of the comparison value and other values.
data = np.array([[-1, 1, 0, 2],
[3, 0, 1, 0],
[-5, 0, -1, 0],
[-3, 1, 2, 2]], dtype=int64_dtype)
self.check_terms(
terms={
'ne': c != compval,
},
expected={
'ne': (data != compval) & (data != C.missing_value),
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(
__fail_fast=True,
compval=['a', 'ab', '', 'not in the array'],
missing=['a', 'ab', '', 'not in the array'],
labelarray_dtype=(bytes_dtype, unicode_dtype, categorical_dtype),
)
def test_string_not_equal(self, compval, missing, labelarray_dtype):
compval = labelarray_dtype.type(compval)
class C(Classifier):
dtype = categorical_dtype
missing_value = missing
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of the comparison value and other values.
data = LabelArray(
np.asarray(
[['', 'a', 'ab', 'ba'],
['z', 'ab', 'a', 'ab'],
['aa', 'ab', '', 'ab'],
['aa', 'a', 'ba', 'ba']],
dtype=labelarray_dtype,
),
missing_value=missing,
)
expected = (
(data.as_int_array() != data.reverse_categories.get(compval, -1)) &
(data.as_int_array() != data.reverse_categories[C.missing_value])
)
self.check_terms(
terms={
'ne': c != compval,
},
expected={
'ne': expected,
},
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(
__fail_fast=True,
compval=[u'a', u'b', u'ab', u'not in the array'],
missing=[u'a', u'ab', u'', u'not in the array'],
labelarray_dtype=(categorical_dtype, bytes_dtype, unicode_dtype),
)
def test_string_elementwise_predicates(self,
compval,
missing,
labelarray_dtype):
if labelarray_dtype == bytes_dtype:
compval = compval.encode('utf-8')
missing = missing.encode('utf-8')
startswith_re = b'^' + compval + b'.*'
endswith_re = b'.*' + compval + b'$'
substring_re = b'.*' + compval + b'.*'
else:
startswith_re = '^' + compval + '.*'
endswith_re = '.*' + compval + '$'
substring_re = '.*' + compval + '.*'
class C(Classifier):
dtype = categorical_dtype
missing_value = missing
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of the comparison value and other values.
data = LabelArray(
np.asarray(
[['', 'a', 'ab', 'ba'],
['z', 'ab', 'a', 'ab'],
['aa', 'ab', '', 'ab'],
['aa', 'a', 'ba', 'ba']],
dtype=labelarray_dtype,
),
missing_value=missing,
)
terms = {
'startswith': c.startswith(compval),
'endswith': c.endswith(compval),
'has_substring': c.has_substring(compval),
# Equivalent filters using regex matching.
'startswith_re': c.matches(startswith_re),
'endswith_re': c.matches(endswith_re),
'has_substring_re': c.matches(substring_re),
}
expected = {
'startswith': (data.startswith(compval) & (data != missing)),
'endswith': (data.endswith(compval) & (data != missing)),
'has_substring': (data.has_substring(compval) & (data != missing)),
}
for key in list(expected):
expected[key + '_re'] = expected[key]
self.check_terms(
terms=terms,
expected=expected,
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
@parameter_space(
__fail_fast=True,
container_type=(set, list, tuple, frozenset),
labelarray_dtype=(categorical_dtype, bytes_dtype, unicode_dtype),
)
def test_element_of_strings(self, container_type, labelarray_dtype):
missing = labelarray_dtype.type("not in the array")
class C(Classifier):
dtype = categorical_dtype
missing_value = missing
inputs = ()
window_length = 0
c = C()
raw = np.asarray(
[['', 'a', 'ab', 'ba'],
['z', 'ab', 'a', 'ab'],
['aa', 'ab', '', 'ab'],
['aa', 'a', 'ba', 'ba']],
dtype=labelarray_dtype,
)
data = LabelArray(raw, missing_value=missing)
choices = [
container_type(choices) for choices in [
[],
['a', ''],
['a', 'a', 'a', 'ab', 'a'],
set(data.reverse_categories) - {missing},
['random value', 'ab'],
['_' * i for i in range(30)],
]
]
def make_expected(choice_set):
return np.vectorize(choice_set.__contains__, otypes=[bool])(raw)
terms = {str(i): c.element_of(s) for i, s in enumerate(choices)}
expected = {str(i): make_expected(s) for i, s in enumerate(choices)}
self.check_terms(
terms=terms,
expected=expected,
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
def test_element_of_integral(self):
"""
Element of is well-defined for integral classifiers.
"""
class C(Classifier):
dtype = int64_dtype
missing_value = -1
inputs = ()
window_length = 0
c = C()
# There's no significance to the values here other than that they
# contain a mix of missing and non-missing values.
data = np.array([[-1, 1, 0, 2],
[3, 0, 1, 0],
[-5, 0, -1, 0],
[-3, 1, 2, 2]], dtype=int64_dtype)
terms = {}
expected = {}
for choices in [(0,), (0, 1), (0, 1, 2)]:
terms[str(choices)] = c.element_of(choices)
expected[str(choices)] = reduce(
or_,
(data == elem for elem in choices),
np.zeros_like(data, dtype=bool),
)
self.check_terms(
terms=terms,
expected=expected,
initial_workspace={c: data},
mask=self.build_mask(self.ones_mask(shape=data.shape)),
)
def test_element_of_rejects_missing_value(self):
"""
Test that element_of raises a useful error if we attempt to pass it an
array of choices that include the classifier's missing_value.
"""
missing = "not in the array"
class C(Classifier):
dtype = categorical_dtype
missing_value = missing
inputs = ()
window_length = 0
c = C()
for bad_elems in ([missing], [missing, 'random other value']):
with self.assertRaises(ValueError) as e:
c.element_of(bad_elems)
errmsg = str(e.exception)
expected = (
"Found self.missing_value ('not in the array') in choices"
" supplied to C.element_of().\n"
"Missing values have NaN semantics, so the requested"
" comparison would always produce False.\n"
"Use the isnull() method to check for missing values.\n"
"Received choices were {}.".format(bad_elems)
)
self.assertEqual(errmsg, expected)
@parameter_space(dtype_=Classifier.ALLOWED_DTYPES)
def test_element_of_rejects_unhashable_type(self, dtype_):
class C(Classifier):
dtype = dtype_
missing_value = dtype.type('1')
inputs = ()
window_length = 0
c = C()
with self.assertRaises(TypeError) as e:
c.element_of([{'a': 1}])
errmsg = str(e.exception)
expected = (
"Expected `choices` to be an iterable of hashable values,"
" but got [{'a': 1}] instead.\n"
"This caused the following error: "
"TypeError(\"unhashable type: 'dict'\",)."
)
self.assertEqual(errmsg, expected)
class TestPostProcessAndToWorkSpaceValue(ZiplineTestCase):
def test_reversability_categorical(self):
class F(Classifier):
inputs = ()
window_length = 0
dtype = categorical_dtype
missing_value = '<missing>'
f = F()
column_data = LabelArray(
np.array(
[['a', f.missing_value],
['b', f.missing_value],
['c', 'd']],
),
missing_value=f.missing_value,
)
assert_equal(
f.postprocess(column_data.ravel()),
pd.Categorical(
['a', f.missing_value, 'b', f.missing_value, 'c', 'd'],
),
)
# only include the non-missing data
pipeline_output = pd.Series(
data=['a', 'b', 'c', 'd'],
index=pd.MultiIndex.from_arrays([
[pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
pd.Timestamp('2014-01-03')],
[0, 0, 0, 1],
]),
dtype='category',
)
assert_equal(
f.to_workspace_value(pipeline_output, pd.Index([0, 1])),
column_data,
)
def test_reversability_int64(self):
class F(Classifier):
inputs = ()
window_length = 0
dtype = int64_dtype
missing_value = -1
f = F()
column_data = np.array(
[[0, f.missing_value],
[1, f.missing_value],
[2, 3]],
)
assert_equal(f.postprocess(column_data.ravel()), column_data.ravel())
# only include the non-missing data
pipeline_output = pd.Series(
data=[0, 1, 2, 3],
index=pd.MultiIndex.from_arrays([
[pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
pd.Timestamp('2014-01-03')],
[0, 0, 0, 1],
]),
dtype=int64_dtype,
)
assert_equal(
f.to_workspace_value(pipeline_output, pd.Index([0, 1])),
column_data,
)
|
apache-2.0
|
alubbock/pysb
|
pysb/tests/test_simulator_scipy.py
|
5
|
19613
|
from pysb.testing import *
import sys
import copy
import numpy as np
from pysb import Monomer, Parameter, Initial, Observable, Rule, Expression
from pysb.simulator import ScipyOdeSimulator, InconsistentParameterError
from pysb.simulator.scipyode import CythonRhsBuilder
from pysb.examples import robertson, earm_1_0, tyson_oscillator, localfunc
import unittest
import pandas as pd
class TestScipySimulatorBase(object):
@with_model
def setUp(self):
Monomer('A', ['a'])
Monomer('B', ['b'])
Parameter('ksynthA', 100)
Parameter('ksynthB', 100)
Parameter('kbindAB', 100)
Parameter('A_init', 0)
Parameter('B_init', 0)
Initial(A(a=None), A_init)
Initial(B(b=None), B_init)
Observable("A_free", A(a=None))
Observable("B_free", B(b=None))
Observable("AB_complex", A(a=1) % B(b=1))
Rule('A_synth', None >> A(a=None), ksynthA)
Rule('B_synth', None >> B(b=None), ksynthB)
Rule('AB_bind', A(a=None) + B(b=None) >> A(a=1) % B(b=1), kbindAB)
self.model = model
# Convenience shortcut for accessing model monomer objects
self.mon = lambda m: self.model.monomers[m]
# This timespan is chosen to be enough to trigger a Jacobian evaluation
# on the various solvers.
self.time = np.linspace(0, 1)
self.sim = ScipyOdeSimulator(self.model, tspan=self.time,
integrator='vode')
def tearDown(self):
self.model = None
self.time = None
self.sim = None
class TestScipySimulatorSingle(TestScipySimulatorBase):
def test_vode_solver_run(self):
"""Test vode."""
simres = self.sim.run()
assert simres._nsims == 1
@raises(ValueError)
def test_invalid_init_kwarg(self):
ScipyOdeSimulator(self.model, tspan=self.time, spam='eggs')
def test_lsoda_solver_run(self):
"""Test lsoda."""
solver_lsoda = ScipyOdeSimulator(self.model, tspan=self.time,
integrator='lsoda')
solver_lsoda.run()
def test_lsoda_jac_solver_run(self):
"""Test lsoda and analytic jacobian."""
solver_lsoda_jac = ScipyOdeSimulator(self.model, tspan=self.time,
integrator='lsoda',
use_analytic_jacobian=True)
solver_lsoda_jac.run()
def test_y0_as_list(self):
"""Test y0 with list of initial conditions"""
# Test the initials getter method before anything is changed
assert np.allclose(
self.sim.initials[0][0:2],
[ic.value.value for ic in self.model.initials]
)
initials = [10, 20, 0]
simres = self.sim.run(initials=initials)
assert np.allclose(simres.initials[0], initials)
assert np.allclose(simres.observables['A_free'][0], 10)
def test_y0_as_ndarray(self):
"""Test y0 with numpy ndarray of initial conditions"""
simres = self.sim.run(initials=np.asarray([10, 20, 0]))
assert np.allclose(simres.observables['A_free'][0], 10)
def test_y0_as_dictionary_monomer_species(self):
"""Test y0 with model-defined species."""
self.sim.initials = {self.mon('A')(a=None): 17}
base_initials = self.sim.initials
assert base_initials[0][0] == 17
simres = self.sim.run(initials={self.mon('A')(a=None): 10,
self.mon('B')(b=1) % self.mon('A')(a=1): 0,
self.mon('B')(b=None): 0})
assert np.allclose(simres.initials, [10, 0, 0])
assert np.allclose(simres.observables['A_free'][0], 10)
# Initials should reset to base values
assert np.allclose(self.sim.initials, base_initials)
def test_y0_as_dictionary_with_bound_species(self):
"""Test y0 with dynamically generated species."""
simres = self.sim.run(initials={self.mon('A')(a=None): 0,
self.mon('B')(b=1) % self.mon('A')(a=1): 100,
self.mon('B')(b=None): 0})
assert np.allclose(simres.observables['AB_complex'][0], 100)
def test_y0_as_dataframe(self):
initials_dict = {self.mon('A')(a=None): [0],
self.mon('B')(b=1) % self.mon('A')(a=1): [100],
self.mon('B')(b=None): [0]}
initials_df = pd.DataFrame(initials_dict)
simres = self.sim.run(initials=initials_df)
assert np.allclose(simres.observables['AB_complex'][0], 100)
@raises(ValueError)
def test_y0_as_pandas_series(self):
self.sim.run(initials=pd.Series())
@raises(TypeError)
def test_y0_non_numeric_value(self):
"""Test y0 with non-numeric value."""
self.sim.run(initials={self.mon('A')(a=None): 'eggs'})
def test_param_values_as_dictionary(self):
"""Test param_values as a dictionary."""
simres = self.sim.run(param_values={'kbindAB': 0})
# kbindAB=0 should ensure no AB_complex is produced.
assert np.allclose(simres.observables["AB_complex"], 0)
def test_param_values_as_dataframe(self):
simres = self.sim.run(param_values=pd.DataFrame({'kbindAB': [0]}))
assert np.allclose(simres.observables['AB_complex'], 0)
@raises(ValueError)
def test_param_values_as_pandas_series(self):
self.sim.run(param_values=pd.Series())
def test_param_values_as_list_ndarray(self):
"""Test param_values as a list and ndarray."""
orig_param_values = self.sim.param_values
param_values = [50, 60, 70, 0, 0]
self.sim.param_values = param_values
simres = self.sim.run()
assert np.allclose(self.sim.param_values, param_values)
assert np.allclose(simres.param_values, param_values)
# Reset to original param values
self.sim.param_values = orig_param_values
# Same thing, but with a numpy array, applied as a run argument
param_values = np.asarray([55, 65, 75, 0, 0])
simres = self.sim.run(param_values=param_values)
assert np.allclose(simres.param_values, param_values)
# param_values should reset to originals after the run
assert np.allclose(self.sim.param_values, orig_param_values)
@raises(IndexError)
def test_param_values_invalid_dictionary_key(self):
"""Test param_values with invalid parameter name."""
self.sim.run(param_values={'spam': 150})
@raises(ValueError, TypeError)
def test_param_values_non_numeric_value(self):
"""Test param_values with non-numeric value."""
self.sim.run(param_values={'ksynthA': 'eggs'})
def test_result_dataframe(self):
df = self.sim.run().dataframe
class TestScipyOdeCompilerTests(TestScipySimulatorBase):
"""Test vode and analytic jacobian with different compiler backends"""
def setUp(self):
super(TestScipyOdeCompilerTests, self).setUp()
self.args = {'model': self.model,
'tspan': self.time,
'integrator': 'vode',
'use_analytic_jacobian': True}
self.python_sim = ScipyOdeSimulator(compiler='python', **self.args)
self.python_res = self.python_sim.run()
def test_cython(self):
sim = ScipyOdeSimulator(compiler='cython', **self.args)
simres = sim.run()
assert simres.species.shape[0] == self.args['tspan'].shape[0]
assert np.allclose(self.python_res.dataframe, simres.dataframe)
class TestScipySimulatorSequential(TestScipySimulatorBase):
def test_sequential_initials(self):
simres = self.sim.run()
orig_initials = self.sim.initials
new_initials = [10, 20, 30]
simres = self.sim.run(initials=new_initials)
# Check that single-run initials applied properly to the result
assert np.allclose(simres.species[0], new_initials)
assert np.allclose(simres.initials, new_initials)
# Check that the single-run initials were removed after the run
assert np.allclose(self.sim.initials, orig_initials)
def test_sequential_initials_dict_then_list(self):
A, B = self.model.monomers
base_sim = ScipyOdeSimulator(
self.model,
initials={A(a=None): 10, B(b=None): 20})
assert np.allclose(base_sim.initials, [10, 20, 0])
assert len(base_sim.initials_dict) == 2
# Now set initials using a list, which should overwrite the dict
base_sim.initials = [30, 40, 50]
assert np.allclose(base_sim.initials, [30, 40, 50])
assert np.allclose(
sorted([x[0] for x in base_sim.initials_dict.values()]),
base_sim.initials)
def test_sequential_param_values(self):
orig_param_values = self.sim.param_values
new_param_values = {'kbindAB': 0}
new_initials = [15, 25, 35]
simres = self.sim.run(param_values=new_param_values,
initials=new_initials)
# No new AB_complex should be formed
assert np.allclose(simres.observables['AB_complex'], new_initials[2])
assert simres.nsims == 1
# Original param_values should be restored after run
assert np.allclose(self.sim.param_values, orig_param_values)
# Check that per-run param override works when a base param
# dictionary is also specified
self.sim.param_values = new_param_values
base_param_values = new_param_values
new_param_values = {'ksynthB': 50}
simres = self.sim.run(param_values=new_param_values)
# Check that new param value override applied
assert np.allclose(simres.param_values[0][1],
new_param_values['ksynthB'])
# Check that simulator reverts to base param values
assert np.allclose(self.sim.param_values[0][2],
base_param_values['kbindAB'])
# Reset to original param values
self.sim.param_values = orig_param_values
def test_sequential_tspan(self):
tspan = np.linspace(0, 10, 11)
orig_tspan = self.sim.tspan
simres = self.sim.run(tspan=tspan)
# Check that new tspan applied properly
assert np.allclose(simres.tout, tspan)
# Check that simulator instance reset to original tspan
assert np.allclose(self.sim.tspan, orig_tspan)
class TestScipySimulatorMultiple(TestScipySimulatorBase):
def test_initials_and_param_values_two_lists(self):
initials = [[10, 20, 30], [50, 60, 70]]
param_values = [[55, 65, 75, 0, 0],
[90, 100, 110, 5, 6]]
import pysb.bng
pysb.bng.generate_equations(self.sim.model)
simres = self.sim.run(initials=initials, param_values=param_values)
assert np.allclose(simres.species[0][0], initials[0])
assert np.allclose(simres.species[1][0], initials[1])
assert np.allclose(simres.param_values[0], param_values[0])
assert np.allclose(simres.param_values[1], param_values[1])
assert simres.nsims == 2
# Check the methods underlying these properties work
df = simres.dataframe
all = simres.all
# Try overriding above lists of initials/params with dicts
self.sim.initials = initials
self.sim.param_values = param_values
simres = self.sim.run(
initials={self.mon('A')(a=None): [103, 104]},
param_values={'ksynthA': [101, 102]})
# Simulator initials and params should not persist run() overrides
assert np.allclose(self.sim.initials, initials)
assert np.allclose(self.sim.param_values, param_values)
# Create the expected initials/params arrays and compare to result
initials = np.array(initials)
initials[:, 0] = [103, 104]
param_values = np.array(param_values)
param_values[:, 0] = [101, 102]
assert np.allclose(simres.initials, initials)
assert np.allclose(simres.param_values, param_values)
@raises(ValueError)
def test_run_initials_different_length_to_base(self):
initials = [[10, 20, 30, 40], [50, 60, 70, 80]]
self.sim.initials = initials
self.sim.run(initials=initials[0])
@raises(ValueError)
def test_run_params_different_length_to_base(self):
param_values = [[55, 65, 75, 0, 0, 1],
[90, 100, 110, 5, 6, 7]]
self.sim.param_values = param_values
self.sim.run(param_values=param_values[0])
@raises(InconsistentParameterError)
def test_run_params_inconsistent_parameter_list(self):
param_values = [55, 65, 75, 0, -3]
self.sim.param_values = param_values
self.sim.run(param_values=param_values[0])
@raises(InconsistentParameterError)
def test_run_params_inconsistent_parameter_dict(self):
param_values = {'A_init': [0, -4]}
self.sim.param_values = param_values
self.sim.run(param_values=param_values[0])
def test_param_values_dict(self):
param_values = {'A_init': [0, 100]}
initials = {self.model.monomers['B'](b=None): [250, 350]}
simres = self.sim.run(param_values=param_values)
assert np.allclose(simres.dataframe.loc[(slice(None), 0.0), 'A_free'],
[0, 100])
simres = self.sim.run(param_values={'B_init': [200, 300]})
assert np.allclose(simres.dataframe.loc[(slice(None), 0.0), 'A_free'],
[0, 0])
assert np.allclose(simres.dataframe.loc[(slice(None), 0.0), 'B_free'],
[200, 300])
simres = self.sim.run(initials=initials, param_values=param_values)
assert np.allclose(simres.dataframe.loc[(slice(None), 0.0), 'A_free'],
[0, 100])
assert np.allclose(simres.dataframe.loc[(slice(None), 0.0), 'B_free'],
[250, 350])
@raises(ValueError)
def test_initials_and_param_values_differing_lengths(self):
initials = [[10, 20, 30, 40], [50, 60, 70, 80]]
param_values = [[55, 65, 75, 0, 0],
[90, 100, 110, 5, 6],
[90, 100, 110, 5, 6]]
self.sim.run(initials=initials, param_values=param_values)
@unittest.skipIf(sys.version_info.major < 3,
'Parallel execution requires Python >= 3.3')
def test_parallel(self):
for integrator in ('vode', 'lsoda'):
for use_analytic_jacobian in (True, False):
yield self._check_parallel, integrator, use_analytic_jacobian
def _check_parallel(self, integrator, use_analytic_jacobian):
initials = [[10, 20, 30], [50, 60, 70]]
sim = ScipyOdeSimulator(
self.model, self.sim.tspan,
initials=initials,
integrator=integrator,
use_analytic_jacobian=use_analytic_jacobian
)
base_res = sim.run(initials=initials)
res = sim.run(initials=initials, num_processors=2)
assert np.allclose(res.species, base_res.species)
@with_model
def test_integrate_with_expression():
"""Ensure a model with Expressions simulates."""
Monomer('s1')
Monomer('s9')
Monomer('s16')
Monomer('s20')
# Parameters should be able to contain s(\d+) without error
Parameter('ks0',2e-5)
Parameter('ka20', 1e5)
Initial(s9(), Parameter('s9_0', 10000))
Observable('s1_obs', s1())
Observable('s9_obs', s9())
Observable('s16_obs', s16())
Observable('s20_obs', s20())
Expression('keff', (ks0*ka20)/(ka20+s9_obs))
Rule('R1', None >> s16(), ks0)
Rule('R2', None >> s20(), ks0)
Rule('R3', s16() + s20() >> s16() + s1(), keff)
time = np.linspace(0, 40)
sim = ScipyOdeSimulator(model, tspan=time)
simres = sim.run()
keff_vals = simres.expressions['keff']
assert len(keff_vals) == len(time)
assert np.allclose(keff_vals, 1.8181818181818182e-05)
def test_set_initial_to_zero():
sim = ScipyOdeSimulator(robertson.model, tspan=np.linspace(0, 100))
simres = sim.run(initials={robertson.model.monomers['A'](): 0})
assert np.allclose(simres.observables['A_total'], 0)
def test_robertson_integration():
"""Ensure robertson model simulates."""
t = np.linspace(0, 100)
sim = ScipyOdeSimulator(robertson.model, tspan=t, compiler="python")
simres = sim.run()
assert simres.species.shape[0] == t.shape[0]
# Also run with cython compiler if available.
if CythonRhsBuilder.check_safe():
sim = ScipyOdeSimulator(robertson.model, tspan=t, compiler="cython")
simres = sim.run()
assert simres.species.shape[0] == t.shape[0]
def test_earm_integration():
"""Ensure earm_1_0 model simulates."""
t = np.linspace(0, 1e3)
sim = ScipyOdeSimulator(earm_1_0.model, tspan=t, compiler="python")
sim.run()
# Also run with cython compiler if available.
if CythonRhsBuilder.check_safe():
ScipyOdeSimulator(earm_1_0.model, tspan=t, compiler="cython").run()
@raises(ValueError)
def test_simulation_no_tspan():
ScipyOdeSimulator(robertson.model).run()
@raises(UserWarning)
def test_nonexistent_integrator():
"""Ensure nonexistent integrator raises."""
ScipyOdeSimulator(robertson.model, tspan=np.linspace(0, 1, 2),
integrator='does_not_exist')
def test_unicode_obsname_ascii():
"""Ensure ascii-convetible unicode observable names are handled."""
t = np.linspace(0, 100)
rob_copy = copy.deepcopy(robertson.model)
rob_copy.observables[0].name = u'A_total'
sim = ScipyOdeSimulator(rob_copy)
simres = sim.run(tspan=t)
simres.all
simres.dataframe
if sys.version_info[0] < 3:
@raises(ValueError)
def test_unicode_obsname_nonascii():
"""Ensure non-ascii unicode observable names error in python 2."""
t = np.linspace(0, 100)
rob_copy = copy.deepcopy(robertson.model)
rob_copy.observables[0].name = u'A_total\u1234'
sim = ScipyOdeSimulator(rob_copy)
simres = sim.run(tspan=t)
def test_unicode_exprname_ascii():
"""Ensure ascii-convetible unicode expression names are handled."""
t = np.linspace(0, 100)
rob_copy = copy.deepcopy(robertson.model)
ab = rob_copy.observables['A_total'] + rob_copy.observables['B_total']
expr = Expression(u'A_plus_B', ab, _export=False)
rob_copy.add_component(expr)
sim = ScipyOdeSimulator(rob_copy)
simres = sim.run(tspan=t)
simres.all
simres.dataframe
if sys.version_info[0] < 3:
@raises(ValueError)
def test_unicode_exprname_nonascii():
"""Ensure non-ascii unicode expression names error in python 2."""
t = np.linspace(0, 100)
rob_copy = copy.deepcopy(robertson.model)
ab = rob_copy.observables['A_total'] + rob_copy.observables['B_total']
expr = Expression(u'A_plus_B\u1234', ab, _export=False)
rob_copy.add_component(expr)
sim = ScipyOdeSimulator(rob_copy)
simres = sim.run(tspan=t)
def test_multiprocessing_lambdify():
model = tyson_oscillator.model
pars = [p.value for p in model.parameters]
tspan = np.linspace(0, 100, 100)
ScipyOdeSimulator(
model, tspan=tspan, compiler='python',
use_analytic_jacobian=True
).run(param_values=[pars, pars], num_processors=2)
def test_lambdify_localfunc():
model = localfunc.model
ScipyOdeSimulator(model, tspan=range(100), compiler='python').run()
|
bsd-2-clause
|
gengliangwang/spark
|
python/pyspark/pandas/internal.py
|
1
|
57972
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An internal immutable DataFrame with some metadata to manage indexes.
"""
import re
from typing import Dict, List, Optional, Sequence, Tuple, Union, TYPE_CHECKING, cast
from itertools import accumulate
import py4j
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype, is_datetime64_dtype, is_datetime64tz_dtype
from pyspark import sql as spark
from pyspark._globals import _NoValue, _NoValueType
from pyspark.sql import functions as F, Window
from pyspark.sql.functions import PandasUDFType, pandas_udf
from pyspark.sql.types import BooleanType, DataType, StructField, StructType, LongType
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.config import get_option
from pyspark.pandas.typedef import (
Dtype,
as_spark_type,
extension_dtypes,
infer_pd_series_spark_type,
spark_type_to_pandas_dtype,
)
from pyspark.pandas.utils import (
column_labels_level,
default_session,
is_name_like_tuple,
is_testing,
lazy_property,
name_like_string,
scol_for,
spark_column_equals,
verify_temp_column_name,
)
# A function to turn given numbers to Spark columns that represent pandas-on-Spark index.
SPARK_INDEX_NAME_FORMAT = "__index_level_{}__".format
SPARK_DEFAULT_INDEX_NAME = SPARK_INDEX_NAME_FORMAT(0)
# A pattern to check if the name of a Spark column is a pandas-on-Spark index name or not.
SPARK_INDEX_NAME_PATTERN = re.compile(r"__index_level_[0-9]+__")
NATURAL_ORDER_COLUMN_NAME = "__natural_order__"
HIDDEN_COLUMNS = {NATURAL_ORDER_COLUMN_NAME}
DEFAULT_SERIES_NAME = 0
SPARK_DEFAULT_SERIES_NAME = str(DEFAULT_SERIES_NAME)
class InternalFrame(object):
"""
The internal immutable DataFrame which manages Spark DataFrame and column names and index
information.
.. note:: this is an internal class. It is not supposed to be exposed to users and users
should not directly access to it.
The internal immutable DataFrame represents the index information for a DataFrame it belongs to.
For instance, if we have a pandas-on-Spark DataFrame as below, pandas DataFrame does not
store the index as columns.
>>> psdf = ps.DataFrame({
... 'A': [1, 2, 3, 4],
... 'B': [5, 6, 7, 8],
... 'C': [9, 10, 11, 12],
... 'D': [13, 14, 15, 16],
... 'E': [17, 18, 19, 20]}, columns = ['A', 'B', 'C', 'D', 'E'])
>>> psdf # doctest: +NORMALIZE_WHITESPACE
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
However, all columns including index column are also stored in Spark DataFrame internally
as below.
>>> psdf._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
In order to fill this gap, the current metadata is used by mapping Spark's internal column
to pandas-on-Spark's index. See the method below:
* `spark_frame` represents the internal Spark DataFrame
* `data_spark_column_names` represents non-indexing Spark column names
* `data_spark_columns` represents non-indexing Spark columns
* `data_dtypes` represents external non-indexing dtypes
* `index_spark_column_names` represents internal index Spark column names
* `index_spark_columns` represents internal index Spark columns
* `index_dtypes` represents external index dtypes
* `spark_column_names` represents all columns
* `index_names` represents the external index name as a label
* `to_internal_spark_frame` represents Spark DataFrame derived by the metadata. Includes index.
* `to_pandas_frame` represents pandas DataFrame derived by the metadata
>>> internal = psdf._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None]
>>> internal.data_dtypes
[dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64')]
>>> internal.index_dtypes
[dtype('int64')]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
In case that index is set to one of the existing column as below:
>>> psdf1 = psdf.set_index("A")
>>> psdf1 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
>>> psdf1._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal = psdf1._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[('A',)]
>>> internal.data_dtypes
[dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64')]
>>> internal.index_dtypes
[dtype('int64')]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
In case that index becomes a multi index as below:
>>> psdf2 = psdf.set_index("A", append=True)
>>> psdf2 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
>>> psdf2._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal = psdf2._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__', 'A']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None, ('A',)]
>>> internal.data_dtypes
[dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64')]
>>> internal.index_dtypes
[dtype('int64'), dtype('int64')]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
For multi-level columns, it also holds column_labels
>>> columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'),
... ('Y', 'C'), ('Y', 'D')])
>>> psdf3 = ps.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16],
... [17, 18, 19, 20]], columns = columns)
>>> psdf3 # doctest: +NORMALIZE_WHITESPACE
X Y
A B C D
0 1 2 3 4
1 5 6 7 8
2 9 10 11 12
3 13 14 15 16
4 17 18 19 20
>>> internal = psdf3._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+------+------+------+------+-----------------+
|__index_level_0__|(X, A)|(X, B)|(Y, C)|(Y, D)|__natural_order__|
+-----------------+------+------+------+------+-----------------+
| 0| 1| 2| 3| 4| ...|
| 1| 5| 6| 7| 8| ...|
| 2| 9| 10| 11| 12| ...|
| 3| 13| 14| 15| 16| ...|
| 4| 17| 18| 19| 20| ...|
+-----------------+------+------+------+------+-----------------+
>>> internal.data_spark_column_names
['(X, A)', '(X, B)', '(Y, C)', '(Y, D)']
>>> internal.column_labels
[('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]
For Series, it also holds scol to represent the column.
>>> psseries = psdf1.B
>>> psseries
A
1 5
2 6
3 7
4 8
Name: B, dtype: int64
>>> internal = psseries._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B']
>>> internal.index_names
[('A',)]
>>> internal.data_dtypes
[dtype('int64')]
>>> internal.index_dtypes
[dtype('int64')]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+
| A| B|
+---+---+
| 1| 5|
| 2| 6|
| 3| 7|
| 4| 8|
+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B
A
1 5
2 6
3 7
4 8
"""
def __init__(
self,
spark_frame: spark.DataFrame,
index_spark_columns: Optional[List[spark.Column]],
index_names: Optional[List[Optional[Tuple]]] = None,
index_dtypes: Optional[List[Dtype]] = None,
column_labels: Optional[List[Tuple]] = None,
data_spark_columns: Optional[List[spark.Column]] = None,
data_dtypes: Optional[List[Dtype]] = None,
column_label_names: Optional[List[Optional[Tuple]]] = None,
):
"""
Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and
index fields and names.
:param spark_frame: Spark DataFrame to be managed.
:param index_spark_columns: list of Spark Column
Spark Columns for the index.
:param index_names: list of tuples
the index names.
:param index_dtypes: list of dtypes
the index dtypes.
:param column_labels: list of tuples with the same length
The multi-level values in the tuples.
:param data_spark_columns: list of Spark Column
Spark Columns to appear as columns. If this is None, calculated
from spark_frame.
:param data_dtypes: list of dtypes.
the data dtypes.
:param column_label_names: Names for each of the column index levels.
See the examples below to refer what each parameter means.
>>> column_labels = pd.MultiIndex.from_tuples(
... [('a', 'x'), ('a', 'y'), ('b', 'z')], names=["column_labels_a", "column_labels_b"])
>>> row_index = pd.MultiIndex.from_tuples(
... [('foo', 'bar'), ('foo', 'bar'), ('zoo', 'bar')],
... names=["row_index_a", "row_index_b"])
>>> psdf = ps.DataFrame(
... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=row_index, columns=column_labels)
>>> psdf.set_index(('a', 'x'), append=True, inplace=True)
>>> psdf # doctest: +NORMALIZE_WHITESPACE
column_labels_a a b
column_labels_b y z
row_index_a row_index_b (a, x)
foo bar 1 2 3
4 5 6
zoo bar 7 8 9
>>> internal = psdf._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+-----------------+------+------+------+...
|__index_level_0__|__index_level_1__|(a, x)|(a, y)|(b, z)|...
+-----------------+-----------------+------+------+------+...
| foo| bar| 1| 2| 3|...
| foo| bar| 4| 5| 6|...
| zoo| bar| 7| 8| 9|...
+-----------------+-----------------+------+------+------+...
>>> internal.index_spark_columns # doctest: +SKIP
[Column<'__index_level_0__'>, Column<'__index_level_1__'>, Column<'(a, x)'>]
>>> internal.index_names
[('row_index_a',), ('row_index_b',), ('a', 'x')]
>>> internal.index_dtypes
[dtype('O'), dtype('O'), dtype('int64')]
>>> internal.column_labels
[('a', 'y'), ('b', 'z')]
>>> internal.data_spark_columns # doctest: +SKIP
[Column<'(a, y)'>, Column<'(b, z)'>]
>>> internal.data_dtypes
[dtype('int64'), dtype('int64')]
>>> internal.column_label_names
[('column_labels_a',), ('column_labels_b',)]
"""
assert isinstance(spark_frame, spark.DataFrame)
assert not spark_frame.isStreaming, "pandas-on-Spark does not support Structured Streaming."
if not index_spark_columns:
if data_spark_columns is not None:
if column_labels is not None:
data_spark_columns = [
scol.alias(name_like_string(label))
for scol, label in zip(data_spark_columns, column_labels)
]
spark_frame = spark_frame.select(data_spark_columns)
assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in spark_frame.columns), (
"Index columns should not appear in columns of the Spark DataFrame. Avoid "
"index column names [%s]." % SPARK_INDEX_NAME_PATTERN
)
# Create default index.
spark_frame = InternalFrame.attach_default_index(spark_frame)
index_spark_columns = [scol_for(spark_frame, SPARK_DEFAULT_INDEX_NAME)]
if data_spark_columns is not None:
data_spark_columns = [
scol_for(spark_frame, col)
for col in spark_frame.columns
if col != SPARK_DEFAULT_INDEX_NAME
]
if NATURAL_ORDER_COLUMN_NAME not in spark_frame.columns:
spark_frame = spark_frame.withColumn(
NATURAL_ORDER_COLUMN_NAME, F.monotonically_increasing_id()
)
self._sdf = spark_frame # type: spark.DataFrame
# index_spark_columns
assert all(
isinstance(index_scol, spark.Column) for index_scol in index_spark_columns
), index_spark_columns
self._index_spark_columns = index_spark_columns # type: List[spark.Column]
# index_names
if not index_names:
index_names = [None] * len(index_spark_columns)
assert len(index_spark_columns) == len(index_names), (
len(index_spark_columns),
len(index_names),
)
assert all(
is_name_like_tuple(index_name, check_type=True) for index_name in index_names
), index_names
self._index_names = index_names # type: List[Optional[Tuple]]
# index_dtypes
if not index_dtypes:
index_dtypes = [None] * len(index_spark_columns)
assert len(index_spark_columns) == len(index_dtypes), (
len(index_spark_columns),
len(index_dtypes),
)
index_dtypes = [
spark_type_to_pandas_dtype(spark_frame.select(scol).schema[0].dataType)
if dtype is None or dtype == np.dtype("object")
else dtype
for dtype, scol in zip(index_dtypes, index_spark_columns)
]
assert all(
isinstance(dtype, Dtype.__args__) # type: ignore
and (dtype == np.dtype("object") or as_spark_type(dtype, raise_error=False) is not None)
for dtype in index_dtypes
), index_dtypes
self._index_dtypes = index_dtypes # type: List[Dtype]
# data_spark-columns
if data_spark_columns is None:
data_spark_columns = [
scol_for(spark_frame, col)
for col in spark_frame.columns
if all(
not spark_column_equals(scol_for(spark_frame, col), index_scol)
for index_scol in index_spark_columns
)
and col not in HIDDEN_COLUMNS
]
self._data_spark_columns = data_spark_columns # type: List[spark.Column]
else:
assert all(isinstance(scol, spark.Column) for scol in data_spark_columns)
self._data_spark_columns = data_spark_columns
# column_labels
if column_labels is None:
self._column_labels = [
(col,) for col in spark_frame.select(self._data_spark_columns).columns
] # type: List[Tuple]
else:
assert len(column_labels) == len(self._data_spark_columns), (
len(column_labels),
len(self._data_spark_columns),
)
if len(column_labels) == 1:
column_label = column_labels[0]
assert is_name_like_tuple(column_label, check_type=True), column_label
else:
assert all(
is_name_like_tuple(column_label, check_type=True)
for column_label in column_labels
), column_labels
assert len(set(len(label) for label in column_labels)) <= 1, column_labels
self._column_labels = column_labels
# data_dtypes
if not data_dtypes:
data_dtypes = [None] * len(data_spark_columns)
assert len(data_spark_columns) == len(data_dtypes), (
len(data_spark_columns),
len(data_dtypes),
)
data_dtypes = [
spark_type_to_pandas_dtype(spark_frame.select(scol).schema[0].dataType)
if dtype is None or dtype == np.dtype("object")
else dtype
for dtype, scol in zip(data_dtypes, data_spark_columns)
]
assert all(
isinstance(dtype, Dtype.__args__) # type: ignore
and (dtype == np.dtype("object") or as_spark_type(dtype, raise_error=False) is not None)
for dtype in data_dtypes
), data_dtypes
self._data_dtypes = data_dtypes # type: List[Dtype]
# column_label_names
if column_label_names is None:
self._column_label_names = [None] * column_labels_level(
self._column_labels
) # type: List[Optional[Tuple]]
else:
if len(self._column_labels) > 0:
assert len(column_label_names) == column_labels_level(self._column_labels), (
len(column_label_names),
column_labels_level(self._column_labels),
)
else:
assert len(column_label_names) > 0, len(column_label_names)
assert all(
is_name_like_tuple(column_label_name, check_type=True)
for column_label_name in column_label_names
), column_label_names
self._column_label_names = column_label_names
@staticmethod
def attach_default_index(
sdf: spark.DataFrame, default_index_type: Optional[str] = None
) -> spark.DataFrame:
"""
This method attaches a default index to Spark DataFrame. Spark does not have the index
notion so corresponding column should be generated.
There are several types of default index can be configured by `compute.default_index_type`.
>>> spark_frame = ps.range(10).to_spark()
>>> spark_frame
DataFrame[id: bigint]
It adds the default index column '__index_level_0__'.
>>> spark_frame = InternalFrame.attach_default_index(spark_frame)
>>> spark_frame
DataFrame[__index_level_0__: bigint, id: bigint]
It throws an exception if the given column name already exists.
>>> InternalFrame.attach_default_index(spark_frame)
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: '__index_level_0__' already exists...
"""
index_column = SPARK_DEFAULT_INDEX_NAME
assert (
index_column not in sdf.columns
), "'%s' already exists in the Spark column names '%s'" % (index_column, sdf.columns)
if default_index_type is None:
default_index_type = get_option("compute.default_index_type")
if default_index_type == "sequence":
return InternalFrame.attach_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed-sequence":
return InternalFrame.attach_distributed_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed":
return InternalFrame.attach_distributed_column(sdf, column_name=index_column)
else:
raise ValueError(
"'compute.default_index_type' should be one of 'sequence',"
" 'distributed-sequence' and 'distributed'"
)
@staticmethod
def attach_sequence_column(sdf: spark.DataFrame, column_name: str) -> spark.DataFrame:
scols = [scol_for(sdf, column) for column in sdf.columns]
sequential_index = (
F.row_number().over(Window.orderBy(F.monotonically_increasing_id())).cast("long") - 1
)
return sdf.select(sequential_index.alias(column_name), *scols)
@staticmethod
def attach_distributed_column(sdf: spark.DataFrame, column_name: str) -> spark.DataFrame:
scols = [scol_for(sdf, column) for column in sdf.columns]
return sdf.select(F.monotonically_increasing_id().alias(column_name), *scols)
@staticmethod
def attach_distributed_sequence_column(
sdf: spark.DataFrame, column_name: str
) -> spark.DataFrame:
"""
This method attaches a Spark column that has a sequence in a distributed manner.
This is equivalent to the column assigned when default index type 'distributed-sequence'.
>>> sdf = ps.DataFrame(['a', 'b', 'c']).to_spark()
>>> sdf = InternalFrame.attach_distributed_sequence_column(sdf, column_name="sequence")
>>> sdf.show() # doctest: +NORMALIZE_WHITESPACE
+--------+---+
|sequence| 0|
+--------+---+
| 0| a|
| 1| b|
| 2| c|
+--------+---+
"""
if len(sdf.columns) > 0:
try:
jdf = sdf._jdf.toDF() # type: ignore
sql_ctx = sdf.sql_ctx
encoders = sql_ctx._jvm.org.apache.spark.sql.Encoders # type: ignore
encoder = encoders.tuple(jdf.exprEnc(), encoders.scalaLong())
jrdd = jdf.localCheckpoint(False).rdd().zipWithIndex()
df = spark.DataFrame(
sql_ctx.sparkSession._jsparkSession.createDataset( # type: ignore
jrdd, encoder
).toDF(),
sql_ctx,
)
columns = df.columns
return df.selectExpr(
"`{}` as `{}`".format(columns[1], column_name), "`{}`.*".format(columns[0])
)
except py4j.protocol.Py4JError:
if is_testing():
raise
return InternalFrame._attach_distributed_sequence_column(sdf, column_name)
else:
cnt = sdf.count()
if cnt > 0:
return default_session().range(cnt).toDF(column_name)
else:
return default_session().createDataFrame(
[], schema=StructType().add(column_name, data_type=LongType(), nullable=False)
)
@staticmethod
def _attach_distributed_sequence_column(
sdf: spark.DataFrame, column_name: str
) -> spark.DataFrame:
"""
>>> sdf = ps.DataFrame(['a', 'b', 'c']).to_spark()
>>> sdf = InternalFrame._attach_distributed_sequence_column(sdf, column_name="sequence")
>>> sdf.sort("sequence").show() # doctest: +NORMALIZE_WHITESPACE
+--------+---+
|sequence| 0|
+--------+---+
| 0| a|
| 1| b|
| 2| c|
+--------+---+
"""
scols = [scol_for(sdf, column) for column in sdf.columns]
spark_partition_column = verify_temp_column_name(sdf, "__spark_partition_id__")
offset_column = verify_temp_column_name(sdf, "__offset__")
row_number_column = verify_temp_column_name(sdf, "__row_number__")
# 1. Calculates counts per each partition ID. `counts` here is, for instance,
# {
# 1: 83,
# 6: 83,
# 3: 83,
# ...
# }
sdf = sdf.withColumn(spark_partition_column, F.spark_partition_id())
# Checkpoint the DataFrame to fix the partition ID.
sdf = sdf.localCheckpoint(eager=False)
counts = map(
lambda x: (x["key"], x["count"]),
sdf.groupby(sdf[spark_partition_column].alias("key")).count().collect(),
)
# 2. Calculates cumulative sum in an order of partition id.
# Note that it does not matter if partition id guarantees its order or not.
# We just need a one-by-one sequential id.
# sort by partition key.
sorted_counts = sorted(counts, key=lambda x: x[0])
# get cumulative sum in an order of partition key.
cumulative_counts = [0] + list(accumulate(map(lambda count: count[1], sorted_counts)))
# zip it with partition key.
sums = dict(zip(map(lambda count: count[0], sorted_counts), cumulative_counts))
# 3. Attach offset for each partition.
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def offset(id: pd.Series) -> pd.Series:
current_partition_offset = sums[id.iloc[0]]
return pd.Series(current_partition_offset).repeat(len(id))
sdf = sdf.withColumn(offset_column, offset(spark_partition_column))
# 4. Calculate row_number in each partition.
w = Window.partitionBy(spark_partition_column).orderBy(F.monotonically_increasing_id())
row_number = F.row_number().over(w)
sdf = sdf.withColumn(row_number_column, row_number)
# 5. Calculate the index.
return sdf.select(
(sdf[offset_column] + sdf[row_number_column] - 1).alias(column_name), *scols
)
def spark_column_for(self, label: Tuple) -> spark.Column:
""" Return Spark Column for the given column label. """
column_labels_to_scol = dict(zip(self.column_labels, self.data_spark_columns))
if label in column_labels_to_scol:
return column_labels_to_scol[label]
else:
raise KeyError(name_like_string(label))
def spark_column_name_for(self, label_or_scol: Union[Tuple, spark.Column]) -> str:
""" Return the actual Spark column name for the given column label. """
if isinstance(label_or_scol, spark.Column):
scol = label_or_scol
else:
scol = self.spark_column_for(label_or_scol)
return self.spark_frame.select(scol).columns[0]
def spark_type_for(self, label_or_scol: Union[Tuple, spark.Column]) -> DataType:
""" Return DataType for the given column label. """
if isinstance(label_or_scol, spark.Column):
scol = label_or_scol
else:
scol = self.spark_column_for(label_or_scol)
return self.spark_frame.select(scol).schema[0].dataType
def spark_column_nullable_for(self, label_or_scol: Union[Tuple, spark.Column]) -> bool:
""" Return nullability for the given column label. """
if isinstance(label_or_scol, spark.Column):
scol = label_or_scol
else:
scol = self.spark_column_for(label_or_scol)
return self.spark_frame.select(scol).schema[0].nullable
def dtype_for(self, label: Tuple) -> Dtype:
""" Return dtype for the given column label. """
column_labels_to_dtype = dict(zip(self.column_labels, self.data_dtypes))
if label in column_labels_to_dtype:
return column_labels_to_dtype[label]
else:
raise KeyError(name_like_string(label))
@property
def spark_frame(self) -> spark.DataFrame:
""" Return the managed Spark DataFrame. """
return self._sdf
@lazy_property
def data_spark_column_names(self) -> List[str]:
""" Return the managed column field names. """
return self.spark_frame.select(self.data_spark_columns).columns
@property
def data_spark_columns(self) -> List[spark.Column]:
""" Return Spark Columns for the managed data columns. """
return self._data_spark_columns
@property
def index_spark_column_names(self) -> List[str]:
""" Return the managed index field names. """
return self.spark_frame.select(self.index_spark_columns).columns
@property
def index_spark_columns(self) -> List[spark.Column]:
""" Return Spark Columns for the managed index columns. """
return self._index_spark_columns
@lazy_property
def spark_column_names(self) -> List[str]:
""" Return all the field names including index field names. """
return self.spark_frame.select(self.spark_columns).columns
@lazy_property
def spark_columns(self) -> List[spark.Column]:
""" Return Spark Columns for the managed columns including index columns. """
index_spark_columns = self.index_spark_columns
return index_spark_columns + [
spark_column
for spark_column in self.data_spark_columns
if all(
not spark_column_equals(spark_column, scol)
for scol in index_spark_columns
)
]
@property
def index_names(self) -> List[Optional[Tuple]]:
""" Return the managed index names. """
return self._index_names
@lazy_property
def index_level(self) -> int:
""" Return the level of the index. """
return len(self._index_names)
@property
def column_labels(self) -> List[Tuple]:
""" Return the managed column index. """
return self._column_labels
@lazy_property
def column_labels_level(self) -> int:
""" Return the level of the column index. """
return len(self._column_label_names)
@property
def column_label_names(self) -> List[Optional[Tuple]]:
""" Return names of the index levels. """
return self._column_label_names
@property
def index_dtypes(self) -> List[Dtype]:
""" Return dtypes for the managed index columns. """
return self._index_dtypes
@property
def data_dtypes(self) -> List[Dtype]:
""" Return dtypes for the managed columns. """
return self._data_dtypes
@lazy_property
def to_internal_spark_frame(self) -> spark.DataFrame:
"""
Return as Spark DataFrame. This contains index columns as well
and should be only used for internal purposes.
"""
index_spark_columns = self.index_spark_columns
data_columns = []
for spark_column in self.data_spark_columns:
if all(
not spark_column_equals(spark_column, scol)
for scol in index_spark_columns
):
data_columns.append(spark_column)
return self.spark_frame.select(index_spark_columns + data_columns)
@lazy_property
def to_pandas_frame(self) -> pd.DataFrame:
""" Return as pandas DataFrame. """
sdf = self.to_internal_spark_frame
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
pdf = pdf.astype(
{field.name: spark_type_to_pandas_dtype(field.dataType) for field in sdf.schema}
)
return InternalFrame.restore_index(pdf, **self.arguments_for_restore_index)
@lazy_property
def arguments_for_restore_index(self) -> Dict:
""" Create arguments for `restore_index`. """
column_names = []
ext_dtypes = {
col: dtype
for col, dtype in zip(self.index_spark_column_names, self.index_dtypes)
if isinstance(dtype, extension_dtypes)
}
categorical_dtypes = {
col: dtype
for col, dtype in zip(self.index_spark_column_names, self.index_dtypes)
if isinstance(dtype, CategoricalDtype)
}
for spark_column, column_name, dtype in zip(
self.data_spark_columns, self.data_spark_column_names, self.data_dtypes
):
for index_spark_column_name, index_spark_column in zip(
self.index_spark_column_names, self.index_spark_columns
):
if spark_column_equals(spark_column, index_spark_column):
column_names.append(index_spark_column_name)
break
else:
column_names.append(column_name)
if isinstance(dtype, extension_dtypes):
ext_dtypes[column_name] = dtype
elif isinstance(dtype, CategoricalDtype):
categorical_dtypes[column_name] = dtype
return dict(
index_columns=self.index_spark_column_names,
index_names=self.index_names,
data_columns=column_names,
column_labels=self.column_labels,
column_label_names=self.column_label_names,
ext_dtypes=ext_dtypes,
categorical_dtypes=categorical_dtypes,
)
@staticmethod
def restore_index(
pdf: pd.DataFrame,
*,
index_columns: List[str],
index_names: List[Tuple],
data_columns: List[str],
column_labels: List[Tuple],
column_label_names: List[Tuple],
ext_dtypes: Dict[str, Dtype] = None,
categorical_dtypes: Dict[str, CategoricalDtype] = None
) -> pd.DataFrame:
"""
Restore pandas DataFrame indices using the metadata.
:param pdf: the pandas DataFrame to be processed.
:param index_columns: the original column names for index columns.
:param index_names: the index names after restored.
:param data_columns: the original column names for data columns.
:param column_labels: the column labels after restored.
:param column_label_names: the column label names after restored.
:param ext_dtypes: the map from the original column names to extension data types.
:param categorical_dtypes: the map from the original column names to categorical types.
:return: the restored pandas DataFrame
>>> pdf = pd.DataFrame({"index": [10, 20, 30], "a": ['a', 'b', 'c'], "b": [0, 2, 1]})
>>> InternalFrame.restore_index(
... pdf,
... index_columns=["index"],
... index_names=[("idx",)],
... data_columns=["a", "b", "index"],
... column_labels=[("x",), ("y",), ("z",)],
... column_label_names=[("lv1",)],
... ext_dtypes=None,
... categorical_dtypes={"b": CategoricalDtype(categories=["i", "j", "k"])}
... ) # doctest: +NORMALIZE_WHITESPACE
lv1 x y z
idx
10 a i 10
20 b k 20
30 c j 30
"""
if ext_dtypes is not None and len(ext_dtypes) > 0:
pdf = pdf.astype(ext_dtypes, copy=True)
if categorical_dtypes is not None:
for col, dtype in categorical_dtypes.items():
pdf[col] = pd.Categorical.from_codes(
pdf[col], categories=dtype.categories, ordered=dtype.ordered
)
append = False
for index_field in index_columns:
drop = index_field not in data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[data_columns]
pdf.index.names = [
name if name is None or len(name) > 1 else name[0] for name in index_names
]
names = [name if name is None or len(name) > 1 else name[0] for name in column_label_names]
if len(column_label_names) > 1:
pdf.columns = pd.MultiIndex.from_tuples(column_labels, names=names)
else:
pdf.columns = pd.Index(
[None if label is None else label[0] for label in column_labels], name=names[0],
)
return pdf
@lazy_property
def resolved_copy(self) -> "InternalFrame":
""" Copy the immutable InternalFrame with the updates resolved. """
sdf = self.spark_frame.select(self.spark_columns + list(HIDDEN_COLUMNS))
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
data_spark_columns=[scol_for(sdf, col) for col in self.data_spark_column_names],
)
def with_new_sdf(
self,
spark_frame: spark.DataFrame,
*,
index_dtypes: Optional[List[Dtype]] = None,
data_columns: Optional[List[str]] = None,
data_dtypes: Optional[List[Dtype]] = None
) -> "InternalFrame":
""" Copy the immutable InternalFrame with the updates by the specified Spark DataFrame.
:param spark_frame: the new Spark DataFrame
:param index_dtypes: the index dtypes. If None, the original dtyeps are used.
:param data_columns: the new column names. If None, the original one is used.
:param data_dtypes: the data dtypes. If None, the original dtyeps are used.
:return: the copied InternalFrame.
"""
if index_dtypes is None:
index_dtypes = self.index_dtypes
else:
assert len(index_dtypes) == len(self.index_dtypes), (
len(index_dtypes),
len(self.index_dtypes),
)
if data_columns is None:
data_columns = self.data_spark_column_names
else:
assert len(data_columns) == len(self.column_labels), (
len(data_columns),
len(self.column_labels),
)
if data_dtypes is None:
data_dtypes = self.data_dtypes
else:
assert len(data_dtypes) == len(self.column_labels), (
len(data_dtypes),
len(self.column_labels),
)
sdf = spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
index_dtypes=index_dtypes,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_dtypes=data_dtypes,
)
def with_new_columns(
self,
scols_or_pssers: Sequence[Union[spark.Column, "Series"]],
*,
column_labels: Optional[List[Tuple]] = None,
data_dtypes: Optional[List[Dtype]] = None,
column_label_names: Union[Optional[List[Optional[Tuple]]], _NoValueType] = _NoValue,
keep_order: bool = True
) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the specified Spark Columns or Series.
:param scols_or_pssers: the new Spark Columns or Series.
:param column_labels: the new column index.
If None, the column_labels of the corresponding `scols_or_pssers` is used if it is
Series; otherwise the original one is used.
:param data_dtypes: the new dtypes.
If None, the dtypes of the corresponding `scols_or_pssers` is used if it is Series;
otherwise the dtypes will be inferred from the corresponding `scols_or_pssers`.
:param column_label_names: the new names of the column index levels.
:return: the copied InternalFrame.
"""
from pyspark.pandas.series import Series
if column_labels is None:
if all(isinstance(scol_or_psser, Series) for scol_or_psser in scols_or_pssers):
column_labels = [cast(Series, psser)._column_label for psser in scols_or_pssers]
else:
assert len(scols_or_pssers) == len(self.column_labels), (
len(scols_or_pssers),
len(self.column_labels),
)
column_labels = []
for scol_or_psser, label in zip(scols_or_pssers, self.column_labels):
if isinstance(scol_or_psser, Series):
column_labels.append(scol_or_psser._column_label)
else:
column_labels.append(label)
else:
assert len(scols_or_pssers) == len(column_labels), (
len(scols_or_pssers),
len(column_labels),
)
data_spark_columns = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
scol = scol_or_psser.spark.column
else:
scol = scol_or_psser
data_spark_columns.append(scol)
if data_dtypes is None:
data_dtypes = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
data_dtypes.append(scol_or_psser.dtype)
else:
data_dtypes.append(None)
else:
assert len(scols_or_pssers) == len(data_dtypes), (
len(scols_or_pssers),
len(data_dtypes),
)
sdf = self.spark_frame
if not keep_order:
sdf = self.spark_frame.select(self.index_spark_columns + data_spark_columns)
index_spark_columns = [scol_for(sdf, col) for col in self.index_spark_column_names]
data_spark_columns = [
scol_for(sdf, col) for col in self.spark_frame.select(data_spark_columns).columns
]
else:
index_spark_columns = self.index_spark_columns
if column_label_names is _NoValue:
column_label_names = self._column_label_names
return self.copy(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
column_labels=column_labels,
data_spark_columns=data_spark_columns,
data_dtypes=data_dtypes,
column_label_names=column_label_names,
)
def with_filter(self, pred: Union[spark.Column, "Series"]) -> "InternalFrame":
""" Copy the immutable InternalFrame with the updates by the predicate.
:param pred: the predicate to filter.
:return: the copied InternalFrame.
"""
from pyspark.pandas.series import Series
if isinstance(pred, Series):
assert isinstance(pred.spark.data_type, BooleanType), pred.spark.data_type
condition = pred.spark.column
else:
spark_type = self.spark_frame.select(pred).schema[0].dataType
assert isinstance(spark_type, BooleanType), spark_type
condition = pred
return self.with_new_sdf(self.spark_frame.filter(condition).select(self.spark_columns))
def with_new_spark_column(
self,
column_label: Tuple,
scol: spark.Column,
*,
dtype: Optional[Dtype] = None,
keep_order: bool = True
) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the specified Spark Column.
:param column_label: the column label to be updated.
:param scol: the new Spark Column
:param dtype: the new dtype.
If not specified, the dtypes will be inferred from the spark Column.
:return: the copied InternalFrame.
"""
assert column_label in self.column_labels, column_label
idx = self.column_labels.index(column_label)
data_spark_columns = self.data_spark_columns.copy()
data_spark_columns[idx] = scol
data_dtypes = self.data_dtypes.copy()
data_dtypes[idx] = dtype
return self.with_new_columns(
data_spark_columns, data_dtypes=data_dtypes, keep_order=keep_order
)
def select_column(self, column_label: Tuple) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the specified column.
:param column_label: the column label to use.
:return: the copied InternalFrame.
"""
assert column_label in self.column_labels, column_label
return self.copy(
column_labels=[column_label],
data_spark_columns=[self.spark_column_for(column_label)],
data_dtypes=[self.dtype_for(column_label)],
column_label_names=None,
)
def copy(
self,
*,
spark_frame: Union[spark.DataFrame, _NoValueType] = _NoValue,
index_spark_columns: Union[List[spark.Column], _NoValueType] = _NoValue,
index_names: Union[Optional[List[Optional[Tuple]]], _NoValueType] = _NoValue,
index_dtypes: Union[Optional[List[Dtype]], _NoValueType] = _NoValue,
column_labels: Union[Optional[List[Tuple]], _NoValueType] = _NoValue,
data_spark_columns: Union[Optional[List[spark.Column]], _NoValueType] = _NoValue,
data_dtypes: Union[Optional[List[Dtype]], _NoValueType] = _NoValue,
column_label_names: Union[Optional[List[Optional[Tuple]]], _NoValueType] = _NoValue
) -> "InternalFrame":
""" Copy the immutable InternalFrame.
:param spark_frame: the new Spark DataFrame. If not specified, the original one is used.
:param index_spark_columns: the list of Spark Column.
If not specified, the original ones are used.
:param index_names: the index names. If not specified, the original ones are used.
:param index_dtypes: the index dtypes. If not specified, the original dtyeps are used.
:param column_labels: the new column labels. If not specified, the original ones are used.
:param data_spark_columns: the new Spark Columns.
If not specified, the original ones are used.
:param data_dtypes: the data dtypes. If not specified, the original dtyeps are used.
:param column_label_names: the new names of the column index levels.
If not specified, the original ones are used.
:return: the copied immutable InternalFrame.
"""
if spark_frame is _NoValue:
spark_frame = self.spark_frame
if index_spark_columns is _NoValue:
index_spark_columns = self.index_spark_columns
if index_names is _NoValue:
index_names = self.index_names
if index_dtypes is _NoValue:
index_dtypes = self.index_dtypes
if column_labels is _NoValue:
column_labels = self.column_labels
if data_spark_columns is _NoValue:
data_spark_columns = self.data_spark_columns
if data_dtypes is _NoValue:
data_dtypes = self.data_dtypes
if column_label_names is _NoValue:
column_label_names = self.column_label_names
return InternalFrame(
spark_frame=cast(spark.DataFrame, spark_frame),
index_spark_columns=cast(List[spark.Column], index_spark_columns),
index_names=cast(Optional[List[Optional[Tuple]]], index_names),
index_dtypes=cast(Optional[List[Dtype]], index_dtypes),
column_labels=cast(Optional[List[Tuple]], column_labels),
data_spark_columns=cast(Optional[List[spark.Column]], data_spark_columns),
data_dtypes=cast(Optional[List[Dtype]], data_dtypes),
column_label_names=cast(Optional[List[Optional[Tuple]]], column_label_names),
)
@staticmethod
def from_pandas(pdf: pd.DataFrame) -> "InternalFrame":
""" Create an immutable DataFrame from pandas DataFrame.
:param pdf: :class:`pd.DataFrame`
:return: the created immutable DataFrame
"""
index_names = [
name if name is None or isinstance(name, tuple) else (name,) for name in pdf.index.names
]
columns = pdf.columns
if isinstance(columns, pd.MultiIndex):
column_labels = columns.tolist()
else:
column_labels = [(col,) for col in columns]
column_label_names = [
name if name is None or isinstance(name, tuple) else (name,) for name in columns.names
]
(
pdf,
index_columns,
index_dtypes,
data_columns,
data_dtypes,
) = InternalFrame.prepare_pandas_frame(pdf)
schema = StructType(
[
StructField(
name, infer_pd_series_spark_type(col, dtype), nullable=bool(col.isnull().any()),
)
for (name, col), dtype in zip(pdf.iteritems(), index_dtypes + data_dtypes)
]
)
sdf = default_session().createDataFrame(pdf, schema=schema)
return InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index_names,
index_dtypes=index_dtypes,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_dtypes=data_dtypes,
column_label_names=column_label_names,
)
@staticmethod
def prepare_pandas_frame(
pdf: pd.DataFrame, *, retain_index: bool = True
) -> Tuple[pd.DataFrame, List[str], List[Dtype], List[str], List[Dtype]]:
"""
Prepare pandas DataFrame for creating Spark DataFrame.
:param pdf: the pandas DataFrame to be prepared.
:param retain_index: whether the indices should be retained.
:return: the tuple of
- the prepared pandas dataFrame
- index column names for Spark DataFrame
- index dtypes of the given pandas DataFrame
- data column names for Spark DataFrame
- data dtypes of the given pandas DataFrame
>>> pdf = pd.DataFrame(
... {("x", "a"): ['a', 'b', 'c'],
... ("y", "b"): pd.Categorical(["i", "k", "j"], categories=["i", "j", "k"])},
... index=[10, 20, 30])
>>> prepared, index_columns, index_dtypes, data_columns, data_dtypes = (
... InternalFrame.prepare_pandas_frame(pdf))
>>> prepared
__index_level_0__ (x, a) (y, b)
0 10 a 0
1 20 b 2
2 30 c 1
>>> index_columns
['__index_level_0__']
>>> index_dtypes
[dtype('int64')]
>>> data_columns
['(x, a)', '(y, b)']
>>> data_dtypes
[dtype('O'), CategoricalDtype(categories=['i', 'j', 'k'], ordered=False)]
"""
pdf = pdf.copy()
data_columns = [name_like_string(col) for col in pdf.columns]
pdf.columns = data_columns
if retain_index:
index_nlevels = pdf.index.nlevels
index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(index_nlevels)]
pdf.index.names = index_columns
reset_index = pdf.reset_index()
else:
index_nlevels = 0
index_columns = []
reset_index = pdf
index_dtypes = list(reset_index.dtypes)[:index_nlevels]
data_dtypes = list(reset_index.dtypes)[index_nlevels:]
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
elif isinstance(dt, CategoricalDtype):
col = col.cat.codes
reset_index[name] = col.replace({np.nan: None})
return reset_index, index_columns, index_dtypes, data_columns, data_dtypes
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.internal
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.internal.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.internal tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.internal,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
spennihana/h2o-3
|
h2o-py/tests/testdir_misc/pyunit_mojo_predict.py
|
1
|
10966
|
import sys
import tempfile
import shutil
import time
import os
import pandas
sys.path.insert(1, "../../")
import h2o
import h2o.utils.shared_utils as h2o_utils
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
genmodel_name = "h2o-genmodel.jar"
def download_mojo(model, mojo_zip_path, genmodel_path=None):
mojo_zip_path = os.path.abspath(mojo_zip_path)
parent_dir = os.path.dirname(mojo_zip_path)
print("\nDownloading MOJO @... " + parent_dir)
time0 = time.time()
if genmodel_path is None:
genmodel_path = os.path.join(parent_dir, genmodel_name)
mojo_file = model.download_mojo(path=mojo_zip_path, get_genmodel_jar=True, genmodel_name=genmodel_path)
print(" => %s (%d bytes)" % (mojo_file, os.stat(mojo_file).st_size))
assert os.path.exists(mojo_file)
print(" Time taken = %.3fs" % (time.time() - time0))
assert os.path.exists(mojo_zip_path)
print(" => %s (%d bytes)" % (mojo_zip_path, os.stat(mojo_zip_path).st_size))
assert os.path.exists(genmodel_path)
print(" => %s (%d bytes)" % (genmodel_path, os.stat(genmodel_path).st_size))
def mojo_predict_api_test(sandbox_dir):
data = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
input_csv = "%s/in.csv" % sandbox_dir
output_csv = "%s/prediction.csv" % sandbox_dir
h2o.export_file(data[1, 2:], input_csv)
data[1] = data[1].asfactor()
model = H2OGradientBoostingEstimator(distribution="bernoulli")
model.train(x=[2, 3, 4, 5, 6, 7, 8], y=1, training_frame=data)
# download mojo
model_zip_path = os.path.join(sandbox_dir, 'model.zip')
genmodel_path = os.path.join(sandbox_dir, 'h2o-genmodel.jar')
download_mojo(model, model_zip_path)
assert os.path.isfile(model_zip_path)
assert os.path.isfile(genmodel_path)
# test that we can predict using default paths
h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=model_zip_path, verbose=True)
h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=model_zip_path, genmodel_jar_path=genmodel_path,
verbose=True)
assert os.path.isfile(output_csv)
os.remove(model_zip_path)
os.remove(genmodel_path)
os.remove(output_csv)
# test that we can predict using custom genmodel path
other_sandbox_dir = tempfile.mkdtemp()
try:
genmodel_path = os.path.join(other_sandbox_dir, 'h2o-genmodel-custom.jar')
download_mojo(model, model_zip_path, genmodel_path)
assert os.path.isfile(model_zip_path)
assert os.path.isfile(genmodel_path)
try:
h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=model_zip_path, verbose=True)
assert False, "There should be no h2o-genmodel.jar at %s" % sandbox_dir
except RuntimeError:
pass
assert not os.path.isfile(output_csv)
h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=model_zip_path,
genmodel_jar_path=genmodel_path, verbose=True)
assert os.path.isfile(output_csv)
os.remove(output_csv)
output_csv = "%s/out.prediction" % other_sandbox_dir
# test that we can predict using default paths
h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=model_zip_path,
genmodel_jar_path=genmodel_path, verbose=True, output_csv_path=output_csv)
assert os.path.isfile(output_csv)
os.remove(model_zip_path)
os.remove(genmodel_path)
os.remove(output_csv)
finally:
shutil.rmtree(other_sandbox_dir)
def mojo_predict_csv_test(target_dir):
mojo_file_name = "prostate_gbm_model.zip"
mojo_zip_path = os.path.join(target_dir, mojo_file_name)
prostate = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
r = prostate[0].runif()
train = prostate[r < 0.70]
test = prostate[r >= 0.70]
# Getting first row from test data frame
pdf = test[1, 2:]
input_csv = "%s/in.csv" % target_dir
output_csv = "%s/output.csv" % target_dir
h2o.export_file(pdf, input_csv)
# =================================================================
# Regression
# =================================================================
regression_gbm1 = H2OGradientBoostingEstimator(distribution="gaussian")
regression_gbm1.train(x=[2, 3, 4, 5, 6, 7, 8], y=1, training_frame=train)
pred_reg = regression_gbm1.predict(pdf)
p1 = pred_reg[0, 0]
print("Regression prediction: " + str(p1))
download_mojo(regression_gbm1, mojo_zip_path)
print("\nPerforming Regression Prediction using MOJO @... " + target_dir)
prediction_result = h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=mojo_zip_path,
output_csv_path=output_csv)
print("Prediction result: " + str(prediction_result))
assert p1 == float(prediction_result[0]['predict']), "expected predictions to be the same for binary and MOJO model for regression"
# =================================================================
# Binomial
# =================================================================
train[1] = train[1].asfactor()
bernoulli_gbm1 = H2OGradientBoostingEstimator(distribution="bernoulli")
bernoulli_gbm1.train(x=[2, 3, 4, 5, 6, 7, 8], y=1, training_frame=train)
pred_bin = bernoulli_gbm1.predict(pdf)
binary_prediction_0 = pred_bin[0, 1]
binary_prediction_1 = pred_bin[0, 2]
print("Binomial prediction: p0: " + str(binary_prediction_0))
print("Binomial prediction: p1: " + str(binary_prediction_1))
download_mojo(bernoulli_gbm1, mojo_zip_path)
print("\nPerforming Binomial Prediction using MOJO @... " + target_dir)
prediction_result = h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=mojo_zip_path,
output_csv_path=output_csv)
mojo_prediction_0 = float(prediction_result[0]['0'])
mojo_prediction_1 = float(prediction_result[0]['1'])
print("Binomial prediction: p0: " + str(mojo_prediction_0))
print("Binomial prediction: p1: " + str(mojo_prediction_1))
assert binary_prediction_0 == mojo_prediction_0, "expected predictions to be the same for binary and MOJO model for Binomial - p0"
assert binary_prediction_1 == mojo_prediction_1, "expected predictions to be the same for binary and MOJO model for Binomial - p1"
# =================================================================
# Multinomial
# =================================================================
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
r = iris[0].runif()
train = iris[r < 0.90]
test = iris[r >= 0.10]
# Getting first row from test data frame
pdf = test[1, 0:4]
input_csv = "%s/in-multi.csv" % target_dir
output_csv = "%s/output.csv" % target_dir
h2o.export_file(pdf, input_csv)
multi_gbm = H2OGradientBoostingEstimator()
multi_gbm.train(x=['C1', 'C2', 'C3', 'C4'], y='C5', training_frame=train)
pred_multi = multi_gbm.predict(pdf)
multinomial_prediction_1 = pred_multi[0, 1]
multinomial_prediction_2 = pred_multi[0, 2]
multinomial_prediction_3 = pred_multi[0, 3]
print("Multinomial prediction (Binary): p0: " + str(multinomial_prediction_1))
print("Multinomial prediction (Binary): p1: " + str(multinomial_prediction_2))
print("Multinomial prediction (Binary): p2: " + str(multinomial_prediction_3))
download_mojo(multi_gbm, mojo_zip_path)
print("\nPerforming Binomial Prediction using MOJO @... " + target_dir)
prediction_result = h2o_utils.mojo_predict_csv(input_csv_path=input_csv, mojo_zip_path=mojo_zip_path,
output_csv_path=output_csv)
mojo_prediction_1 = float(prediction_result[0]['Iris-setosa'])
mojo_prediction_2 = float(prediction_result[0]['Iris-versicolor'])
mojo_prediction_3 = float(prediction_result[0]['Iris-virginica'])
print("Multinomial prediction (MOJO): p0: " + str(mojo_prediction_1))
print("Multinomial prediction (MOJO): p1: " + str(mojo_prediction_2))
print("Multinomial prediction (MOJO): p2: " + str(mojo_prediction_3))
assert multinomial_prediction_1 == mojo_prediction_1, "expected predictions to be the same for binary and MOJO model for Multinomial - p0"
assert multinomial_prediction_2 == mojo_prediction_2, "expected predictions to be the same for binary and MOJO model for Multinomial - p1"
assert multinomial_prediction_3 == mojo_prediction_3, "expected predictions to be the same for binary and MOJO model for Multinomial - p2"
def mojo_predict_pandas_test(sandbox_dir):
data = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
input_csv = "%s/in.csv" % sandbox_dir
pdf = data[1, 2:]
h2o.export_file(pdf, input_csv)
data[1] = data[1].asfactor()
model = H2OGradientBoostingEstimator(distribution="bernoulli")
model.train(x=[2, 3, 4, 5, 6, 7, 8], y=1, training_frame=data)
h2o_prediction = model.predict(pdf)
# download mojo
model_zip_path = os.path.join(sandbox_dir, 'model.zip')
genmodel_path = os.path.join(sandbox_dir, 'h2o-genmodel.jar')
download_mojo(model, model_zip_path)
assert os.path.isfile(model_zip_path)
assert os.path.isfile(genmodel_path)
pandas_frame = pandas.read_csv(input_csv)
mojo_prediction = h2o_utils.mojo_predict_pandas(dataframe=pandas_frame, mojo_zip_path=model_zip_path, genmodel_jar_path=genmodel_path)
print("Binomial Prediction (Binary) - p0: %f" % h2o_prediction[0,1])
print("Binomial Prediction (Binary) - p1: %f" % h2o_prediction[0,2])
print("Binomial Prediction (MOJO) - p0: %f" % mojo_prediction['0'].iloc[0])
print("Binomial Prediction (MOJO) - p1: %f" % mojo_prediction['1'].iloc[0])
assert h2o_prediction[0,1] == mojo_prediction['0'].iloc[0], "expected predictions to be the same for binary and MOJO model - p0"
assert h2o_prediction[0,2] == mojo_prediction['1'].iloc[0], "expected predictions to be the same for binary and MOJO model - p0"
csv_test_dir = tempfile.mkdtemp()
api_test_dir = tempfile.mkdtemp()
pandas_test_dir = tempfile.mkdtemp()
try:
if __name__ == "__main__":
pyunit_utils.standalone_test(lambda: mojo_predict_api_test(api_test_dir))
pyunit_utils.standalone_test(lambda: mojo_predict_csv_test(csv_test_dir))
pyunit_utils.standalone_test(lambda: mojo_predict_pandas_test(pandas_test_dir))
else:
mojo_predict_api_test(api_test_dir)
mojo_predict_csv_test(csv_test_dir)
mojo_predict_pandas_test(pandas_test_dir)
finally:
shutil.rmtree(csv_test_dir)
shutil.rmtree(api_test_dir)
shutil.rmtree(pandas_test_dir)
|
apache-2.0
|
zlpure/CS231n
|
assignment2/cs231n/features.py
|
30
|
4807
|
import matplotlib
import numpy as np
from scipy.ndimage import uniform_filter
def extract_features(imgs, feature_fns, verbose=False):
"""
Given pixel data for images and several feature functions that can operate on
single images, apply all feature functions to all images, concatenating the
feature vectors for each image and storing the features for all images in
a single matrix.
Inputs:
- imgs: N x H X W X C array of pixel data for N images.
- feature_fns: List of k feature functions. The ith feature function should
take as input an H x W x D array and return a (one-dimensional) array of
length F_i.
- verbose: Boolean; if true, print progress.
Returns:
An array of shape (N, F_1 + ... + F_k) where each column is the concatenation
of all features for a single image.
"""
num_images = imgs.shape[0]
if num_images == 0:
return np.array([])
# Use the first image to determine feature dimensions
feature_dims = []
first_image_features = []
for feature_fn in feature_fns:
feats = feature_fn(imgs[0].squeeze())
assert len(feats.shape) == 1, 'Feature functions must be one-dimensional'
feature_dims.append(feats.size)
first_image_features.append(feats)
# Now that we know the dimensions of the features, we can allocate a single
# big array to store all features as columns.
total_feature_dim = sum(feature_dims)
imgs_features = np.zeros((num_images, total_feature_dim))
imgs_features[0] = np.hstack(first_image_features).T
# Extract features for the rest of the images.
for i in xrange(1, num_images):
idx = 0
for feature_fn, feature_dim in zip(feature_fns, feature_dims):
next_idx = idx + feature_dim
imgs_features[i, idx:next_idx] = feature_fn(imgs[i].squeeze())
idx = next_idx
if verbose and i % 1000 == 0:
print 'Done extracting features for %d / %d images' % (i, num_images)
return imgs_features
def rgb2gray(rgb):
"""Convert RGB image to grayscale
Parameters:
rgb : RGB image
Returns:
gray : grayscale image
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def hog_feature(im):
"""Compute Histogram of Gradient (HOG) feature for an image
Modified from skimage.feature.hog
http://pydoc.net/Python/scikits-image/0.4.2/skimage.feature.hog
Reference:
Histograms of Oriented Gradients for Human Detection
Navneet Dalal and Bill Triggs, CVPR 2005
Parameters:
im : an input grayscale or rgb image
Returns:
feat: Histogram of Gradient (HOG) feature
"""
# convert rgb to grayscale if needed
if im.ndim == 3:
image = rgb2gray(im)
else:
image = np.at_least_2d(im)
sx, sy = image.shape # image size
orientations = 9 # number of gradient bins
cx, cy = (8, 8) # pixels per cell
gx = np.zeros(image.shape)
gy = np.zeros(image.shape)
gx[:, :-1] = np.diff(image, n=1, axis=1) # compute gradient on x-direction
gy[:-1, :] = np.diff(image, n=1, axis=0) # compute gradient on y-direction
grad_mag = np.sqrt(gx ** 2 + gy ** 2) # gradient magnitude
grad_ori = np.arctan2(gy, (gx + 1e-15)) * (180 / np.pi) + 90 # gradient orientation
n_cellsx = int(np.floor(sx / cx)) # number of cells in x
n_cellsy = int(np.floor(sy / cy)) # number of cells in y
# compute orientations integral images
orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))
for i in range(orientations):
# create new integral image for this orientation
# isolate orientations in this range
temp_ori = np.where(grad_ori < 180 / orientations * (i + 1),
grad_ori, 0)
temp_ori = np.where(grad_ori >= 180 / orientations * i,
temp_ori, 0)
# select magnitudes for those orientations
cond2 = temp_ori > 0
temp_mag = np.where(cond2, grad_mag, 0)
orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cx, cy))[cx/2::cx, cy/2::cy].T
return orientation_histogram.ravel()
def color_histogram_hsv(im, nbin=10, xmin=0, xmax=255, normalized=True):
"""
Compute color histogram for an image using hue.
Inputs:
- im: H x W x C array of pixel data for an RGB image.
- nbin: Number of histogram bins. (default: 10)
- xmin: Minimum pixel value (default: 0)
- xmax: Maximum pixel value (default: 255)
- normalized: Whether to normalize the histogram (default: True)
Returns:
1D vector of length nbin giving the color histogram over the hue of the
input image.
"""
ndim = im.ndim
bins = np.linspace(xmin, xmax, nbin+1)
hsv = matplotlib.colors.rgb_to_hsv(im/xmax) * xmax
imhist, bin_edges = np.histogram(hsv[:,:,0], bins=bins, density=normalized)
imhist = imhist * np.diff(bin_edges)
# return histogram
return imhist
pass
|
mit
|
dpshelio/sunpy
|
sunpy/visualization/animator/line.py
|
2
|
7232
|
import numpy as np
from sunpy.visualization.animator.base import ArrayAnimator, edges_to_centers_nd
__all__ = ['LineAnimator']
class LineAnimator(ArrayAnimator):
"""
Create a matplotlib backend independent data explorer for 1D plots.
The following keyboard shortcuts are defined in the viewer:
* 'left': previous step on active slider.
* 'right': next step on active slider.
* 'top': change the active slider up one.
* 'bottom': change the active slider down one.
* 'p': play/pause active slider.
This viewer can have user defined buttons added by specifying the labels
and functions called when those buttons are clicked as keyword arguments.
Parameters
----------
data: `numpy.ndarray`
The y-axis to be visualized.
plot_axis_index: `int`, optional
The axis used to plot against ``data``.
Defaults to ``-1``, i.e., the last dimension of the array.
axis_ranges: `list` of physical coordinates for the `numpy.ndarray`, optional
Defaults to `None` and array indices will be used for all axes.
The `list` should contain one element for each axis of the `numpy.ndarray`.
For the image axes a ``[min, max]`` pair should be specified which will be
passed to `matplotlib.pyplot.imshow` as an extent.
For the slider axes a ``[min, max]`` pair can be specified or an array the
same length as the axis which will provide all values for that slider.
For more information, see the Notes section of this docstring.
xlabel: `str`, optional
Label of x-axis. Defaults to `None`.
ylabel: `str`, optional
Label of y-axis. Defaults to `None`.
xlim: `tuple`, optional
Limits of x-axis of plot. Defaults to `None`.
ylim: `tuple`, optional
Limits of y-axis of plot. Defaults to `None`.
Notes
-----
Additional information on API of ``axes_ranges`` keyword argument.
#. x-axis values must be supplied (if desired) as an array in the element of
the ``axis_ranges`` `list` corresponding to the ``plot_axis_index ``in the data array,
i.e., ``x_axis_values == axis_ranges[plot_axis_index]``
#. The x-axis values represent the edges of the pixels/bins along the plotted
axis, not the centers. Therefore there must be 1 more x-axis value than
there are data points along the x-axis.
#. The shape of the x-axis values array can take two forms.
a) First, it can have a length 1 greater than the length of the data array
along the dimension corresponding to the x-axis, i.e.,
``len(axis_ranges[plot_axis_index]) == len(data[plot_axis_index])+1``.
In this scenario the same x-axis values are used in every frame of the animation.
b) Second, the x-axis array can have the same shape as the data array, with
the exception of the plotted axis which, as above, must be 1 greater than
the length of the data array along that dimension.
In this scenario the x-axis is refreshed for each frame. For example, if
``data.shape == axis_ranges[plot_axis_index].shape == (4, 3)``,
where ``plot_axis_index == 0``, the 0th frame of the animation will show data from
``data[:, 0]`` with the x-axis described by ``axis_ranges[plot_axis_index][:, 0]``,
while the 1st frame will show data from ``data[:, 1]`` with the x-axis described by
``axis_ranges[plot_axis_index][:, 1]``.
#. This API holds for slider axes.
Extra keywords are passed to `~sunpy.visualization.animator.ArrayAnimator`.
"""
def __init__(self, data, plot_axis_index=-1, axis_ranges=None, ylabel=None, xlabel=None,
xlim=None, ylim=None, aspect='auto', **kwargs):
# Check inputs.
self.plot_axis_index = int(plot_axis_index)
if self.plot_axis_index not in range(-data.ndim, data.ndim):
raise ValueError("plot_axis_index must be within range of number of data dimensions"
" (or equivalent negative indices).")
if data.ndim < 2:
raise ValueError("data must have at least two dimensions. One for data "
"for each single plot and at least one for time/iteration.")
# Define number of slider axes.
self.naxis = data.ndim
self.num_sliders = self.naxis-1
# Attach data to class.
if axis_ranges is not None and all(axis_range is None for axis_range in axis_ranges):
axis_ranges = None
if axis_ranges is None or axis_ranges[self.plot_axis_index] is None:
self.xdata = np.arange(data.shape[self.plot_axis_index])
else:
# Else derive the xdata as the centers of the pixel/bin edges
# supplied by the user for the plotted axis.
self.xdata = edges_to_centers_nd(np.asarray(axis_ranges[self.plot_axis_index]),
plot_axis_index)
if ylim is None:
ylim = (data.min(), data.max())
if xlim is None:
xlim = (self.xdata.min(), self.xdata.max())
self.ylim = ylim
self.xlim = xlim
self.xlabel = xlabel
self.ylabel = ylabel
self.aspect = aspect
# Run init for base class
super().__init__(data, image_axes=[self.plot_axis_index], axis_ranges=axis_ranges,
**kwargs)
def plot_start_image(self, ax):
"""
Sets up a plot of initial image.
"""
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
ax.set_aspect(self.aspect, adjustable = 'datalim')
if self.xlabel is not None:
ax.set_xlabel(self.xlabel)
if self.ylabel is not None:
ax.set_ylabel(self.ylabel)
plot_args = {}
plot_args.update(self.imshow_kwargs)
if self.xdata.shape == self.data.shape:
item = [0] * self.data.ndim
item[self.plot_axis_index] = slice(None)
xdata = np.squeeze(self.xdata[tuple(item)])
else:
xdata = self.xdata
line, = ax.plot(xdata, self.data[self.frame_index], **plot_args)
return line
def update_plot(self, val, line, slider):
"""
Updates plot based on slider/array dimension being iterated.
"""
val = int(val)
ax_ind = self.slider_axes[slider.slider_ind]
ind = int(np.argmin(np.abs(self.axis_ranges[ax_ind] - val)))
self.frame_slice[ax_ind] = ind
if val != slider.cval:
line.set_ydata(self.data[self.frame_index])
if self.xdata.shape == self.data.shape:
item = [int(slid._slider.val) for slid in self.sliders]
item[ax_ind] = val
if self.plot_axis_index < 0:
i = self.data.ndim + self.plot_axis_index
else:
i = self.plot_axis_index
item.insert(i, slice(None))
line.set_xdata(self.xdata[tuple(item)])
slider.cval = val
# Update slider label to reflect real world values in axis_ranges.
super().update_plot(val, line, slider)
|
bsd-2-clause
|
tomevans/planetc
|
planetc/transit.py
|
1
|
22091
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import pdb, sys, os
from . import keporb, ma02
from . import phys_consts as consts
# 15Jul2013 TME:
# Actually, I think I worked out that the 'bug' mentioned
# in the previous comment was an artefact of the equations,
# rather than a bug in the code. It might be to do with the
# fact that the equations implemented in the code are
# themselves an approximation that is very good for moderate
# eccentricities, but starts breaking down for higher
# eccentricity orbits.
# 3Mar2013 TME:
# There appears to be a bug in the code for calculating
# eccentric transits that manifests itself as an egress
# (haven't seen an ingress case yet) that isn't perfectly
# smooth - it looks basically right but it's definitely
# a bit funny. Need to fix this bug.
#
# Nov2012 TME:
# This module provides python wrappers for underlying
# C routines that compute Mandel & Agol (2002) transit
# lightcurves.
def ma02_aRs( t, **pars ):
"""
Uses the Mandel & Agol (2002) analytic equations to compute
transit lightcurves. This routine takes the semimajor axis
directly as an input parameter, and therefore does not require
either the star or planet mass; this differs from the
ma02_RsMsMp() routine which uses the star and planet masses
with Kepler's third law to calculate the semimajor axis.
CALLING
F = transit.ma02_aRs( t, **pars )
INPUTS
** t - a numpy array containing the times that the lightcurve
is to be evaluated for.
KEYWORD INPUTS
** pars - a dictionary containing the keyword arguments.
['tr_type'] a flag that can be set to either 'primary',
'secondary' or 'both'; if it is set to 'both', the
Z-coordinate will be calculated along with the normalised
separation in order to distinguish between primary transits
and secondary eclipses, and to scale the alternating flux
changes accordingly; on the other hand, if it is set to
either 'primary' or 'secondary', the Z-coordinate will not
be calculated and all transit events will have the same
shape and depth depending on which is chosen; the latter
saves time and should almost always be used when modelling
on a per-transit basis.
['T0'] time of periapse passage, which this routine will
force to be the same as the mid-transit time if the orbit
is circular, by setting the argument of periapse to 3pi/2.
['P'] orbital period in same units of time as 'T0'.
['aRs'] semimajor axis in units of stellar radii.
['RpRs'] planetary radius in units of stellar radii.
['incl'] orbital inclination in degrees.
['b'] = aRs*cos(i), which can be provided instead of 'incl'
['ecc'] orbital eccentricity.
['SecDepth'] depth of the secondary eclipse; will be set to
zero if not explicitly specified.
['omega'] argument of periapse in degrees; this must be
explicitly specified if the eccentricity is nonzero; if
the orbit is circular, it will be forced to 3pi/2 regardless
of the input value, to ensure that T0 corresponds to the
time of mid-transit.
['ld'] an optional flag that can be set to either None,
'quad' or 'nonlin' to specify the type of limb
darkening law to be used; if it is not set, then no limb
darkening will be used, and this would also be the case if
it is set to None.
['gam1']+['gam2'] quadratic limb darkening coeffs; required
if the 'ld' flag is set to 'quad'.
['c1']+['c2']+['c3']+['c4'] nonlinear limb darkening coeffs;
required if the 'ld' flag is set to 'nonlin'.
OUTPUT
** F - a numpy array containing the relative flux values of
the model transit lightcurve.
"""
# Start unpacking paramteres:
T0 = pars[ 'T0' ]
P = pars[ 'P' ]
aRs = pars[ 'aRs' ]
RpRs = pars[ 'RpRs' ]
try:
incl_rad = np.deg2rad( pars['incl'] )
except:
try:
incl_rad = np.arccos( pars['b']/aRs )
except:
raise StandardError( 'Must provide at least one of incl or b' )
ecc = pars[ 'ecc' ]
try:
foot = pars[ 'foot' ]
except:
foot = 1.
try:
grad = pars[ 'grad' ]
except:
grad = 0.
try:
SecDepth = pars[ 'SecDepth' ]
except:
SecDepth = 0.
try:
tr_type = pars['tr_type']
except:
tr_type = 'both'
# Following the naming convention of the original
# Mandel & Agol (2002) paper for the limb darkening
# coefficients:
#print( pars['ld'] )
#pdb.set_trace()
try:
if pars['ld']=='quad':
gam1 = pars[ 'gam1' ]
gam2 = pars[ 'gam2' ]
elif pars['ld']=='nonlin':
c1 = pars[ 'c1' ]
c2 = pars[ 'c2' ]
c3 = pars[ 'c3' ]
c4 = pars[ 'c4' ]
elif pars['ld']==None:
pars['ld'] = 'quad'
pars['gam1'] = 0.
pars['gam2'] = 0.
except:
pars['ld'] = 'quad'
pars['gam1'] = 0.
pars['gam2'] = 0.
# Calculate the mean anomaly:
t = t.flatten()
MeanAnom = ( 2*np.pi/P )*( t - T0 )
# Calculate the normalised separation between the
# planetary and stellar discs:
if ecc != 0.:
omega_rad = pars['omega'] * np.pi/180.
NormSep = keporb.NormSep( MeanAnom, aRs, ecc, omega_rad, incl_rad )
else:
omega_rad = 3.*np.pi/2.
try:
b = pars['b']
except:
b = aRs*np.cos( incl_rad )
NormSep = np.sqrt( ( ( aRs*np.sin( MeanAnom ) )**2. ) \
+ ( ( b*np.cos( MeanAnom ) )**2. ) )
# If we want to model both primary transits and secondary
# eclipses, we need to compute the Z coordinate to determine
# when the planet is in front of the star (Z<0) and behind
# the star (Z>0):
if tr_type=='both':
F = np.ones( len( t ) )
zcoord = keporb.Zcoord( MeanAnom, aRs, ecc, omega_rad, incl_rad )
ixsf = ( zcoord < 0 )
if ixsf.max()==True:
if pars['ld']=='quad':
F[ixsf] = ma02.F_quad( NormSep[ixsf], RpRs, \
pars['gam1'], pars['gam2'] )
elif pars['ld']=='nonlin':
F[ixsf] = ma02.F_nonlin( NormSep[ixsf], RpRs, \
pars['c1'], pars['c2'], \
pars['c3'], pars['c4'] )
else:
pdb.set_trace()
ixsb = ( zcoord >= 0 )
if ixsb.max()==True:
temp = ma02.F_quad( NormSep[ixsb], RpRs, 0.0, 0.0 ) - 1.
F[ixsb] = 1 + temp*SecDepth/( temp.max() - temp.min() ) #/( RpRs**2. )
# If we're only interested in the primary transits then
# we must take stellar limb darkening into account
# while treating the planet as an non-luminous disc:
elif tr_type=='primary':
if pars['ld']=='quad':
F = ma02.F_quad( NormSep, RpRs, \
pars['gam1'], pars['gam2'] )
elif pars['ld']=='nonlin':
F = ma02.F_nonlin( NormSep, RpRs, \
pars['c1'], pars['c2'], \
pars['c3'], pars['c4'] )
else:
print( '\n\n\n{0:s} not recognised as limb darkening type'\
.format( pars['ld'] ) )
pdb.set_trace()
# If we're only interested in the secondary eclipses
# we treat the planet as a uniform disc with no limb
# darkening:
elif tr_type=='secondary':
temp = ma02.F_quad( NormSep, RpRs, 0.0, 0.0 ) - 1.
F = 1 + temp*SecDepth/( temp.max() - temp.min() ) #/( RpRs**2. )
# If requested, re-scale the lightcurve by a linear
# trend before returning the output:
if ( grad!=0. )+( foot!=1. ):
twid = t.max() - t.min()
tmid = t.min() + 0.5*twid
F = F * ( foot + grad*( t - tmid ) )
# NOTE: This will change the absolute value of the
# eclipse depth, but the fractional value of the
# eclipse depth will remain the same.
return F
def ma02_RsMsRpMp( t, **pars ):
"""
Uses the Mandel & Agol (2002) analytic equations to compute
transit lightcurves. This routine takes the mass and radius
for both the star and planet as input parameters. The masses
are used with Kepler's third law to calculate the semimajor
axis. This differs from the ma02_aRs() routine which takes
the semimajor axis directly as an input parameter.
CALLING
F = transit.ma02_RsMsMp( t, **pars )
INPUTS
** t - a numpy array containing the times that the lightcurve
is to be evaluated for.
KEYWORD INPUTS
** pars - a dictionary containing the keyword arguments:
['tr_type'] a flag that can be set to either 'primary',
'secondary' or 'both'; if it is set to 'both', the
Z-coordinate will be calculated along with the normalised
separation in order to distinguish between primary transits
and secondary eclipses, and to scale the alternating flux
changes accordingly; on the other hand, if it is set to
either 'primary' or 'secondary', the Z-coordinate will not
be calculated and all transit events will have the same
shape and depth depending on which is chosen; the latter
saves time and should almost always be used when modelling
on a per-transit basis.
['T0'] time of periapse passage, which this routine will
force to be the same as the mid-transit time if the orbit
is circular, by setting the argument of periapse to 3pi/2.
['P'] orbital period in same units of time as 'T0'.
['Rs'] stellar radius in solar radii.
['Ms'] stellar mass in solar masses.
['Rp'] planetary radius in Jupiter radii.
['Mp'] planetary mass in Jupiter masses.
['incl'] orbital inclination in degrees.
['ecc'] orbital eccentricity.
['SecDepth'] depth of the secondary eclipse; will be set to
zero if not explicitly specified.
['omega'] argument of periapse in degrees; this must be
explicitly specified if the eccentricity is nonzero; if
the orbit is circular, it will be forced to 3pi/2 regardless
of the input value, to ensure that T0 corresponds to the
time of mid-transit.
['ld'] an optional flag that can be set to either None,
'quad' or 'nonlin' to specify the type of limb
darkening law to be used; if it is not set, then no limb
darkening will be used, and this would also be the case if
it is set to None.
['gam1']+['gam2'] quadratic limb darkening coeffs; required
if the 'ld' flag is set to 'quad'.
['c1']+['c2']+['c3']+['c4'] nonlinear limb darkening coeffs;
required if the 'ld' flag is set to 'nonlin'.
OUTPUT
** F - a numpy array containing the relative flux values of
the model transit lightcurve.
"""
# Start unpacking paramteres:
T0 = pars['T0']
P = pars['P']
Rs = pars['Rs']
Ms = pars['Ms']
Rp = pars['Rp']
Mp = pars['Mp']
SecDepth = pars['SecDepth']
incl_rad = pars['incl'] * np.pi/180.
ecc = pars['ecc']
try:
foot = pars[ 'foot' ]
except:
foot = 1.
try:
grad = pars[ 'grad' ]
except:
grad = 0.
try:
SecDepth = pars[ 'SecDepth' ]
except:
SecDepth = 0.
try:
tr_type = pars['tr_type']
except:
tr_type = 'both'
# Convert some of the units:
Rs *= consts.RSun
Ms *= consts.MSun
Rp *= consts.RJup
Mp *= consts.MJup
# Assuming a 2-body Keplerian orbit, use Kepler's
# third law to calculate the semimajor axis:
a = np.power( ( ( ( ( P*24.*60.*60./( 2*np.pi ) )**2 ) \
* consts.G * ( Ms + Mp) ) ) , (1./3.) )
aRs = a/Rs
RpRs = Rp/Rs
# Following the naming convention of the original
# Mandel & Agol (2002) paper for the limb darkening
# coefficients:
try:
if pars['ld']=='quad':
gam1 = pars[ 'gam1' ]
gam2 = pars[ 'gam2' ]
elif pars['ld']=='nonlin':
c1 = pars[ 'c1' ]
c2 = pars[ 'c2' ]
c3 = pars[ 'c3' ]
c4 = pars[ 'c4' ]
elif pars['ld']==None:
pars['ld'] = 'quad'
pars['gam1'] = 0.
pars['gam2'] = 0.
except:
pars['ld'] = 'quad'
pars['gam1'] = 0.
pars['gam2'] = 0.
# Calculate the mean anomaly:
MeanAnom = ( 2*np.pi/P )*( t - T0 )
# Calculate the normalised separation between the
# planetary and stellar discs:
if ecc != 0.:
omega_rad = pars['omega'] * np.pi/180.
NormSep = keporb.NormSep( MeanAnom, aRs, ecc, omega_rad, incl_rad )
else:
omega_rad = 3.*np.pi/2.
b = aRs*np.cos( incl_rad )
NormSep = np.sqrt( ( ( aRs*np.sin( MeanAnom ) )**2. ) \
+ ( ( b*np.cos( MeanAnom ) )**2. ) )
# If we want to model both primary transits and secondary
# eclipses, we need to compute the Z coordinate to determine
# when the planet is in front of the star (Z<0) and behind
# the star (Z>0):
if tr_type=='both':
F = np.ones( len( t ) )
zcoord = keporb.Zcoord( MeanAnom, aRs, ecc, omega_rad, incl_rad )
ixsf = ( zcoord < 0 )
if pars['ld']=='quad':
F[ixsf] = ma02.F_quad( NormSep[ixsf], RpRs, \
pars['gam1'], pars['gam2'] )
elif pars['ld']=='nonlin':
F[ixsf] = ma02.F_nonlin( NormSep[ixsf], RpRs, \
pars['c1'], pars['c2'], \
pars['c3'], pars['c4'] )
else:
pdb.set_trace()
ixsb = ( zcoord >= 0 )
temp = ma02.F_quad( NormSep[ixsb], RpRs, 0.0, 0.0 ) - 1.
F[ixsb] = 1 + temp*SecDepth/( temp.max() - temp.min() ) #/( RpRs**2. )
# If we're only interested in the primary transits then
# we must take stellar limb darkening into account
# while treating the planet as an non-luminous disc:
elif tr_type=='primary':
if pars['ld']=='quad':
F = ma02.F_quad( NormSep, RpRs, \
pars['gam1'], pars['gam2'] )
elif pars['ld']=='nonlin':
F = ma02.F_nonlin( NormSep, RpRs, \
pars['c1'], pars['c2'], \
pars['c3'], pars['c4'] )
else:
pdb.set_trace()
# If we're only interested in the secondary eclipses
# we treat the planet as a uniform disc with no limb
# darkening:
elif tr_type=='secondary':
temp = ma02.F_quad( NormSep, RpRs, 0.0, 0.0 ) - 1.
F = 1 + temp*SecDepth/( temp.max() - temp.min() ) #/( RpRs**2. )
# If requested, re-scale the lightcurve by a linear
# trend before returning the output:
if ( grad!=0. )+( foot!=1. ):
twid = t.max() - t.min()
tmid = t.min() + 0.5*twid
F = F * ( foot + grad*( t - tmid ) )
return F
def calc_T0( Ttr, P, ecc, omega, transit='primary' ):
"""
SUMMARY
Computes time of periapse passage given the mid-time of
either the primary transit or the secondary eclipse.
CALLING
T0 = transit.calc_T0( Ttr, P, ecc, omega, transit='primary' )
INPUTS
** Ttr - transit/eclipse mid-time.
** P - orbital period.
** ecc - orbital eccentricity.
** omega - argument of periapse in degrees.
KEYWORD INPUTS
** transit - 'primary' or 'secondary' depending on whether the
mid-time Ttr is for the primary transit or secondary eclipse;
default is 'primary'.
OUTPUT
** T0 - the time of periapse passage.
NOTES:
** Ttr and P must have the same units of time, and the output T0
will also have the same units.
** By default, the longitude of the ascending node (big Omega)
is implicitly taken to be 180 degrees in the calculations.
"""
# Convert omega from degrees to radians:
omega *= np.pi / 180.
# Make sure omega has been provided between 0-2pi:
while omega >= 2*np.pi:
omega -= 2*pi
while omega < 0:
omega += 2*np.pi
# Calculate the true anomaly corresponding to the
# midpoint of the transit/eclipse, and ensure that
# the value lies in the 0-2pi range:
if transit=='primary':
TrueAnom_tr = 3*np.pi/2. - omega
elif transit=='secondary':
TrueAnom_tr = np.pi/2. - omega
else:
pdb.set_trace()
while TrueAnom_tr >= 2*np.pi:
TrueAnom_tr -= 2*pi
while TrueAnom_tr < 0:
TrueAnom_tr += 2*np.pi
# Calculate the value of the eccentric anomaly at the time
# of transit/eclipse:
EccAnom_tr = 2 * np.arctan2( np.sqrt( 1. - ecc )*np.sin( TrueAnom_tr/2. ), \
np.sqrt( 1. + ecc )*np.cos( TrueAnom_tr/2. ) )
# Use the eccentric anomaly at the time of transit/eclipse
# to calculate the mean anomaly:
MeanAnom_tr = EccAnom_tr - ecc*np.sin( EccAnom_tr )
# Convert the mean anomaly to a time elapsed since the
# time of periastron passage:
delt = P * ( MeanAnom_tr/2./np.pi )
# Calculate the time of periastron passage:
T0 = Ttr - delt
return T0
def example():
"""
Simple routine that demonstrates various ways of
computing lightcurves, and plots the results.
"""
# Time values:
t = np.linspace( 0., 7., 10000 )
# Orbit properties:
ecc = 0.0
P = 2.1 # days
incl = 88.2 # degrees
omega = 90.0 # degrees
T_tr = 0.0 # time of transit
T0 = calc_T0( T_tr, P, ecc, omega, transit='primary' )
# Star-planet physical:
Rp = 1.0 # Jupiter radii
Mp = 1.0 # Jupiter masses
Rs = 1.0 # solar radii
Ms = 1.0 # solar masses
# Lightcurve properties:
SecDepth = 1e-3
foot = 1.0
grad = 0.0
# Calculate the semimajor axis using Kepler's
# third law:
a = np.power( ( ( ( ( P*24.*60.*60./( 2*np.pi ) )**2 ) * consts.G \
* ( Ms*consts.MSun + Mp*consts.MJup ) ) ) , (1./3.) )
RpRs = ( Rp*consts.RJup )/( Rs*consts.RSun )
aRs = a / ( Rs*consts.RSun )
# Nonlinear limb darkening coeffs:
c1 = -0.1
c2 = +1.4
c3 = -1.2
c4 = +0.5
# Quadratic limb darkening coeffs:
gam1 = 0.5
gam2 = 0.1
# Quadratic limb darkening + RsMsRpMp parameterisation:
pars_RsMsRpMp_q = { 'T0':T0, 'P':P, 'Ms':Ms, 'Mp':Mp, 'Rs':Rs, \
'Rp':Rp, 'SecDepth':SecDepth, 'incl':incl, \
'ecc':ecc, 'omega':omega, 'gam1':gam1, 'gam2':gam2, \
'ld':'quad', 'foot':foot, 'grad':grad }
F_RsMsRpMp_q = ma02_RsMsRpMp( t, **pars_RsMsRpMp_q )
# Nonlinear limb darkening + RsMsRpMp parameterisation:
pars_RsMsRpMp_nl = { 'T0':T0, 'P':P, 'Ms':Ms, 'Mp':Mp, 'Rs':Rs, \
'Rp':Rp, 'SecDepth':SecDepth, 'incl':incl, \
'ecc':ecc, 'omega':omega, 'c1':c1, 'c2':c2, \
'c3':c3, 'c4':c4, 'ld':'nonlin', \
'foot':foot, 'grad':grad }
F_RsMsRpMp_nl = ma02_RsMsRpMp( t, **pars_RsMsRpMp_nl )
# No limb darkening + RsMsRpMp parameterisation:
pars_RsMsRpMp_n = { 'T0':T0, 'P':P, 'Ms':Ms, 'Mp':Mp, 'Rs':Rs, \
'Rp':Rp, 'SecDepth':SecDepth, 'incl':incl, \
'ecc':ecc, 'omega':omega, 'ld':None, \
'foot':foot, 'grad':grad }
F_RsMsRpMp_n = ma02_RsMsRpMp( t, **pars_RsMsRpMp_n )
# Quadratic limb darkening + aRs parameterisation:
pars_aRs_q = { 'T0':T0, 'P':P, 'aRs':aRs, 'RpRs':RpRs, \
'SecDepth':SecDepth, 'incl':incl, 'ecc':ecc, \
'omega':omega, 'gam1':gam1, 'gam2':gam2,
'ld':'quad', 'foot':foot, 'grad':grad }
F_aRs_q = ma02_aRs( t, **pars_aRs_q )
# Nonlinear limb darkening + aRs parameterisation:
pars_aRs_nl = { 'T0':T0, 'P':P, 'aRs':aRs, 'RpRs':RpRs, \
'SecDepth':SecDepth, 'incl':incl, 'ecc':ecc, \
'omega':omega, 'c1':c1, 'c2':c2, 'c3':c3, 'c4':c4,
'ld':'nonlin', 'foot':foot, 'grad':grad }
F_aRs_nl = ma02_aRs( t, **pars_aRs_nl )
# No limb darkening + aRs parameterisation:
pars_aRs_n = { 'T0':T0, 'P':P, 'aRs':aRs, 'RpRs':RpRs, \
'SecDepth':SecDepth, 'incl':incl, 'ecc':ecc, \
'omega':omega, 'ld':None, \
'foot':foot, 'grad':grad }
F_aRs_n = ma02_aRs( t, **pars_aRs_n )
# Plot the results:
fig = plt.figure()
ax1 = fig.add_subplot( 211 )
ax1.plot( t, F_aRs_n, '--g', lw=1 )
ax1.plot( t, F_aRs_nl, '-m', lw=1 )
ax1.plot( t, F_aRs_q, '-b', lw=1 )
#ax1.set_ylim( [ 1-1.4*(RpRs**2.), 1+0.2*(RpRs**2.) ] )
ax1.set_ylim( [ 1. - 1.4*foot*(RpRs**2.), 1. + 0.2*foot*(RpRs**2.) ] )
ax2 = fig.add_subplot( 212, sharex=ax1 )
ax2.plot( t, F_RsMsRpMp_n, '--g', lw=1 )
ax2.plot( t, F_RsMsRpMp_nl, '-m', lw=1 )
ax2.plot( t, F_RsMsRpMp_q, '-b', lw=1 )
#ax2.set_ylim( [ 1-1.4*(RpRs**2.), 1+0.2*(RpRs**2.) ] )
ax2.set_ylim( [ 1. - 1.4*foot*(RpRs**2.), 1. + 0.2*foot*(RpRs**2.) ] )
ax2.set_xlabel( 'Time' )
# Discrepencies between output from routines
# using different parameterisations:
print( 'This should be zero --> {0:.10f}'\
.format( ( F_RsMsRpMp_q - F_aRs_q ).max() ) )
print( 'This should be zero --> {0:.10f}'\
.format( ( F_RsMsRpMp_nl - F_aRs_nl ).max() ) )
return None
|
gpl-2.0
|
JanNash/sms-tools
|
lectures/05-Sinusoidal-model/plots-code/spectral-peaks-interpolation.py
|
22
|
1234
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
from scipy.fftpack import fft, ifft
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9.5, 5.5))
plt.subplot (2,1,1)
plt.plot(freqaxis,mX,'r', lw=1.5)
plt.axis([300,2500,-70,max(mX)])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + spectral peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis,pX,'c', lw=1.5)
plt.axis([300,2500,min(pX),-1])
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + spectral peaks')
plt.tight_layout()
plt.savefig('spectral-peaks-interpolation.png')
plt.show()
|
agpl-3.0
|
robbymeals/scikit-learn
|
examples/cluster/plot_mini_batch_kmeans.py
|
265
|
4081
|
"""
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
bsd-3-clause
|
melgor/autograd
|
examples/sinusoid.py
|
3
|
1120
|
from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
import matplotlib.pyplot as plt
from autograd import grad
from six.moves import map
from six.moves import range
def fun(x):
return np.sin(x)
d_fun = grad(fun) # First derivative
dd_fun = grad(d_fun) # Second derivative
x = np.linspace(-10, 10, 100)
plt.plot(x, list(map(fun, x)), x, list(map(d_fun, x)), x, list(map(dd_fun, x)))
plt.xlim([-10, 10])
plt.ylim([-1.2, 1.2])
plt.axis('off')
plt.savefig("sinusoid.png")
plt.clf()
# Taylor approximation to sin function
def fun(x):
currterm = x
ans = currterm
for i in range(1000):
print(i, end=' ')
currterm = - currterm * x ** 2 / ((2 * i + 3) * (2 * i + 2))
ans = ans + currterm
if np.abs(currterm) < 0.2: break # (Very generous tolerance!)
return ans
d_fun = grad(fun)
dd_fun = grad(d_fun)
x = np.linspace(-10, 10, 100)
plt.plot(x, list(map(fun, x)), x, list(map(d_fun, x)), x, list(map(dd_fun, x)))
plt.xlim([-10, 10])
plt.ylim([-1.2, 1.2])
plt.axis('off')
plt.savefig("sinusoid_taylor.png")
plt.clf()
|
mit
|
kevin-intel/scikit-learn
|
examples/exercises/plot_cv_diabetes.py
|
23
|
2755
|
"""
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
X, y = datasets.load_diabetes(return_X_y=True)
X = X[:150]
y = y[:150]
lasso = Lasso(random_state=0, max_iter=10000)
alphas = np.logspace(-4, -0.5, 30)
tuned_parameters = [{'alpha': alphas}]
n_folds = 5
clf = GridSearchCV(lasso, tuned_parameters, cv=n_folds, refit=False)
clf.fit(X, y)
scores = clf.cv_results_['mean_test_score']
scores_std = clf.cv_results_['std_test_score']
plt.figure().set_size_inches(8, 6)
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
std_error = scores_std / np.sqrt(n_folds)
plt.semilogx(alphas, scores + std_error, 'b--')
plt.semilogx(alphas, scores - std_error, 'b--')
# alpha=0.2 controls the translucency of the fill color
plt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2)
plt.ylabel('CV score +/- std error')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
plt.xlim([alphas[0], alphas[-1]])
# #############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = LassoCV(alphas=alphas, random_state=0, max_iter=10000)
k_fold = KFold(3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold.split(X, y)):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
|
bsd-3-clause
|
jm-begon/scikit-learn
|
examples/linear_model/plot_lasso_lars.py
|
363
|
1080
|
#!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
neuroidss/nupic.research
|
projects/capybara/sandbox/classification/plot_raw_sensortag_data.py
|
9
|
2428
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import os
import matplotlib.pyplot as plt
from settings.acc_data import (INPUT_FILES,
METRICS,
DATA_DIR)
numRecordsToPlot = 500
plt.figure(figsize=(20, 13))
for inputFile in INPUT_FILES:
filePath = os.path.join(DATA_DIR, inputFile)
with open(filePath, 'rU') as f:
reader = csv.reader(f)
headers = reader.next()
reader.next()
t = []
x = []
y = []
z = []
for i, values in enumerate(reader):
record = dict(zip(headers, values))
try:
for metric in METRICS:
float(record[metric])
x.append(float(record['x']))
y.append(float(record['y']))
z.append(float(record['z']))
t.append(i)
except ValueError:
print "Not possible to convert some values of %s to a float" % record
if i > numRecordsToPlot:
break
subplot_index = INPUT_FILES.index(inputFile)
ax = plt.subplot(4, 1, subplot_index + 1)
ax.plot(t, x, 'r', label='x')
ax.plot(t, y, 'b', label='y')
ax.plot(t, z, 'g', label='z')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.tight_layout()
plt.title(inputFile)
plt.xlim([0, numRecordsToPlot])
plt.ylim([-8, 8])
plt.xlabel('timestep')
plt.ylabel('accelerometer')
plt.grid()
plt.savefig(outputFile = '%s.png' % inputFile[:-4])
plt.show()
|
agpl-3.0
|
pllim/ginga
|
ginga/rv/plugins/Histogram.py
|
3
|
24097
|
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
``Histogram`` plots a histogram for a region drawn in the image, or for the
entire image.
**Plugin Type: Local**
``Histogram`` is a local plugin, which means it is associated with a channel.
An instance can be opened for each channel.
**Usage**
Click and drag to define a region within the image that will be used to
calculate the histogram. To take the histogram of the full image, click
the button in the UI labeled "Full Image".
.. note:: Depending on the size of the image, calculating the
full histogram may take time.
If a new image is selected for the channel, the histogram plot will be
recalculated based on the current parameters with the new data.
Unless disabled in the settings file for the histogram plugin, a line of
simple statistics for the box is calculated and shown in a line below the
plot.
**UI Controls**
Three radio buttons at the bottom of the UI are used to control the
effects of the click/drag action:
* select "Move" to drag the region to a different location
* select "Draw" to draw a new region
* select "Edit" to edit the region
To make a log plot of the histogram, check the "Log Histogram" checkbox.
To plot by the full range of values in the image instead of by the range
within the cut values, uncheck the "Plot By Cuts" checkbox.
The "NumBins" parameter determines how many bins are used in calculating
the histogram. Type a number in the box and press "Enter" to change the
default value.
**Cut Levels Convenience Controls**
Because a histogram is useful feedback for setting the cut levels,
controls are provided in the UI for setting the low and high cut levels
in the image, as well as for performing an auto cut levels, according to
the auto cut levels settings in the channel preferences.
You can set cut levels by clicking in the histogram plot:
* left click: set low cut
* middle click: reset (auto cut levels)
* right click: set high cut
In addition, you can dynamically adjust the gap between low and high cuts
by scrolling the wheel in the plot (i.e. the "width" of the histogram plot
curve). This has the effect of increasing or decreasing the contrast
within the image. The amount that is changed for each wheel click is set
by the plugin configuration file setting ``scroll_pct``. The default is 10%.
**User Configuration**
"""
import numpy as np
from ginga.gw import Widgets
from ginga import GingaPlugin
from ginga import AutoCuts
try:
from ginga.gw import Plot
from ginga.util import plots
have_mpl = True
except ImportError:
have_mpl = False
__all__ = ['Histogram']
class Histogram(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Histogram, self).__init__(fv, fitsimage)
self.layertag = 'histogram-canvas'
self.histtag = None
# If True, limits X axis to lo/hi cut levels
self.xlimbycuts = True
# percentage to adjust plotting X limits when xlimbycuts is True
self.lim_adj_pct = 0.03
self._split_sizes = [400, 500]
# get Histogram preferences
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_Histogram')
self.settings.add_defaults(draw_then_move=True, num_bins=2048,
hist_color='aquamarine', show_stats=True,
maxdigits=7, scroll_pct=0.10)
self.settings.load(onError='silent')
# Set up histogram control parameters
self.histcolor = self.settings.get('hist_color', 'aquamarine')
self.numbins = self.settings.get('num_bins', 2048)
self.autocuts = AutoCuts.Histogram(self.logger)
# percentage to adjust cuts gap when scrolling in histogram
self.scroll_pct = self.settings.get('scroll_pct', 0.10)
# for formatting statistics line
self.show_stats = self.settings.get('show_stats', True)
maxdigits = self.settings.get('maxdigits', 7)
self.fmt_cell = '{:< %d.%dg}' % (maxdigits - 1, maxdigits // 2)
self.dc = self.fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='cyan', linestyle='dash',
drawdims=True)
canvas.set_callback('draw-event', self.draw_cb)
canvas.set_callback('edit-event', self.edit_cb)
canvas.add_draw_mode('move', down=self.drag,
move=self.drag, up=self.update)
canvas.register_for_cursor_drawing(self.fitsimage)
canvas.set_surface(self.fitsimage)
canvas.set_draw_mode('draw')
self.canvas = canvas
fitssettings = fitsimage.get_settings()
for name in ['cuts']:
fitssettings.get_setting(name).add_callback(
'set', self.cutset_ext_cb, fitsimage)
self.gui_up = False
def build_gui(self, container):
if not have_mpl:
raise ImportError('Install matplotlib to use this plugin')
top = Widgets.VBox()
top.set_border_width(4)
# Make the cuts plot
box, sw, orientation = Widgets.get_oriented_box(container,
orientation=self.settings.get('orientation', None))
box.set_border_width(4)
box.set_spacing(2)
paned = Widgets.Splitter(orientation=orientation)
self.w.splitter = paned
self.plot = plots.Plot(logger=self.logger,
width=400, height=400)
ax = self.plot.add_axis()
ax.grid(True)
self.plot.add_callback('button-press', self.set_cut_by_click)
self.plot.add_callback('scroll', self.adjust_cuts_scroll)
w = Plot.PlotWidget(self.plot)
self.plot.connect_ui()
w.resize(400, 400)
paned.add_widget(Widgets.hadjust(w, orientation))
vbox = Widgets.VBox()
vbox.set_border_width(2)
# for statistics line
self.w.stats1 = Widgets.Label('')
vbox.add_widget(self.w.stats1)
captions = (('Cut Low:', 'label', 'Cut Low', 'entry'),
('Cut High:', 'label', 'Cut High', 'entry',
'Cut Levels', 'button'),
('Auto Levels', 'button'),
('Log Histogram', 'checkbutton',
'Plot By Cuts', 'checkbutton'),
('NumBins:', 'label', 'NumBins', 'entry'),
('Full Image', 'button'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.cut_levels.set_tooltip("Set cut levels manually")
b.auto_levels.set_tooltip("Set cut levels by algorithm")
b.cut_low.set_tooltip("Set low cut level (press Enter)")
b.cut_high.set_tooltip("Set high cut level (press Enter)")
b.log_histogram.set_tooltip("Use the log of the pixel values for the "
"histogram (empty bins map to 10^-1)")
b.plot_by_cuts.set_tooltip("Only show the part of the histogram "
"between the cuts")
b.numbins.set_tooltip("Number of bins for the histogram")
b.full_image.set_tooltip("Use the full image for calculating the "
"histogram")
b.numbins.set_text(str(self.numbins))
b.cut_low.add_callback('activated', lambda w: self.cut_levels())
b.cut_high.add_callback('activated', lambda w: self.cut_levels())
b.cut_levels.add_callback('activated', lambda w: self.cut_levels())
b.auto_levels.add_callback('activated', lambda w: self.auto_levels())
b.log_histogram.set_state(self.plot.logy)
b.log_histogram.add_callback('activated', self.log_histogram_cb)
b.plot_by_cuts.set_state(self.xlimbycuts)
b.plot_by_cuts.add_callback('activated', self.plot_by_cuts_cb)
b.numbins.add_callback('activated', lambda w: self.set_numbins_cb())
b.full_image.add_callback('activated', lambda w: self.full_image_cb())
fr = Widgets.Frame("Histogram")
vbox.add_widget(w)
fr.set_widget(vbox)
box.add_widget(fr, stretch=0)
paned.add_widget(sw)
paned.set_sizes(self._split_sizes)
mode = self.canvas.get_draw_mode()
hbox = Widgets.HBox()
btn1 = Widgets.RadioButton("Move")
btn1.set_state(mode == 'move')
btn1.add_callback('activated',
lambda w, val: self.set_mode_cb('move', val))
btn1.set_tooltip("Choose this to position box")
self.w.btn_move = btn1
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Draw", group=btn1)
btn2.set_state(mode == 'draw')
btn2.add_callback('activated',
lambda w, val: self.set_mode_cb('draw', val))
btn2.set_tooltip("Choose this to draw a replacement box")
self.w.btn_draw = btn2
hbox.add_widget(btn2)
btn3 = Widgets.RadioButton("Edit", group=btn1)
btn3.set_state(mode == 'edit')
btn3.add_callback('activated',
lambda w, val: self.set_mode_cb('edit', val))
btn3.set_tooltip("Choose this to edit a box")
self.w.btn_edit = btn3
hbox.add_widget(btn3)
if self.histtag is None:
self.w.btn_move.set_enabled(False)
self.w.btn_edit.set_enabled(False)
hbox.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(paned, stretch=5)
top.add_widget(hbox, stretch=0)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
self.plot.set_titles(rtitle="Histogram")
# insert canvas, if not already
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add ruler layer
p_canvas.add(self.canvas, tag=self.layertag)
#self.canvas.delete_all_objects()
self.resume()
def pause(self):
self.canvas.ui_set_active(False)
def resume(self):
# turn off any mode user may be in
self.modes_off()
self.canvas.ui_set_active(True, viewer=self.fitsimage)
self.fv.show_status("Draw a rectangle with the right mouse button")
def stop(self):
self.gui_up = False
self._split_sizes = self.w.splitter.get_sizes()
# remove the rect from the canvas
## try:
## self.canvas.delete_object_by_tag(self.histtag)
## except Exception:
## pass
##self.histtag = None
# remove the canvas from the image
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.delete_object_by_tag(self.layertag)
except Exception:
pass
self.fv.show_status("")
def full_image_cb(self):
canvas = self.canvas
try:
canvas.delete_object_by_tag(self.histtag)
except Exception:
pass
image = self.fitsimage.get_vip()
width, height = image.get_size()
x1, y1, x2, y2 = 0, 0, width - 1, height - 1
tag = canvas.add(self.dc.Rectangle(x1, y1, x2, y2,
color='cyan',
linestyle='dash'))
self.draw_cb(canvas, tag)
def get_data(self, image, x1, y1, x2, y2, z=0):
tup = image.cutout_adjust(x1, y1, x2 + 1, y2 + 1, z=z)
return tup[0]
def histogram(self, image, x1, y1, x2, y2, z=None, pct=1.0, numbins=2048):
self.logger.warning("This call will be deprecated soon. "
"Use get_data() and histogram_data().")
data_np = self.get_data(image, x1, y1, x2, y2, z=z)
return self.histogram_data(data_np, pct=pct, numbins=numbins)
def histogram_data(self, data, pct=1.0, numbins=2048):
return self.autocuts.calc_histogram(data, pct=pct, numbins=numbins)
def redo(self):
if self.histtag is None:
return
obj = self.canvas.get_object_by_tag(self.histtag)
if obj.kind != 'compound':
return True
bbox = obj.objects[0]
# Do histogram on the points within the rect
image = self.fitsimage.get_vip()
self.plot.clear()
numbins = self.numbins
depth = image.get_depth()
if depth != 3:
data_np = self.get_data(image, int(bbox.x1), int(bbox.y1),
int(bbox.x2), int(bbox.y2))
res = self.histogram_data(data_np, pct=1.0, numbins=numbins)
# used with 'steps-post' drawstyle, this x and y assignment
# gives correct histogram-steps
x = res.bins
y = np.append(res.dist, res.dist[-1])
ymax = y.max()
if self.plot.logy:
y = np.choose(y > 0, (.1, y))
self.plot.plot(x, y, xtitle="Pixel value", ytitle="Number",
title="Pixel Value Distribution",
color='blue', alpha=1.0, drawstyle='steps-post')
else:
colors = ('red', 'green', 'blue')
ymax = 0
for z in range(depth):
data_np = self.get_data(image, int(bbox.x1), int(bbox.y1),
int(bbox.x2), int(bbox.y2), z=z)
res = self.histogram_data(data_np, pct=1.0, numbins=numbins)
# used with 'steps-post' drawstyle, this x and y assignment
# gives correct histogram-steps
x = res.bins
y = np.append(res.dist, res.dist[-1])
ymax = max(ymax, y.max())
if self.plot.logy:
y = np.choose(y > 0, (.1, y))
self.plot.plot(x, y, xtitle="Pixel value", ytitle="Number",
title="Pixel Value Distribution",
color=colors[z], alpha=0.33,
drawstyle='steps-post')
# show cut levels
loval, hival = self.fitsimage.get_cut_levels()
self.loline = self.plot.ax.axvline(loval, 0.0, 0.99,
linestyle='-', color='brown')
self.hiline = self.plot.ax.axvline(hival, 0.0, 0.99,
linestyle='-', color='green')
if self.xlimbycuts:
# user wants "plot by cuts"--adjust plot limits to show only area
# between locut and high cut "plus a little" so that lo and hi cut
# markers are shown
incr = np.fabs(self.lim_adj_pct * (hival - loval))
self.plot.ax.set_xlim(loval - incr, hival + incr)
# Make x axis labels a little more readable
## lbls = self.plot.ax.xaxis.get_ticklabels()
## for lbl in lbls:
## lbl.set(rotation=45, horizontalalignment='right')
self.w.cut_low.set_text(str(loval))
self.w.cut_high.set_text(str(hival))
self.plot.fig.canvas.draw()
if self.show_stats:
# calculate statistics on finite elements in box
i = np.isfinite(data_np)
if np.any(i):
maxval = np.max(data_np[i])
minval = np.min(data_np[i])
meanval = np.mean(data_np[i])
rmsval = np.sqrt(np.mean(np.square(data_np[i])))
fmt_stat = " Min: %s Max: %s Mean: %s Rms: %s" % (
self.fmt_cell, self.fmt_cell, self.fmt_cell, self.fmt_cell)
sum_text = fmt_stat.format(minval, maxval, meanval, rmsval)
else:
sum_text = "No finite data elements in cutout"
self.w.stats1.set_text(sum_text)
self.fv.show_status("Click or drag left mouse button to move region")
return True
def update(self, canvas, event, data_x, data_y, viewer):
obj = self.canvas.get_object_by_tag(self.histtag)
if obj.kind == 'compound':
bbox = obj.objects[0]
elif obj.kind == 'rectangle':
bbox = obj
else:
return True
# calculate center of bbox
wd = bbox.x2 - bbox.x1
dw = wd // 2
ht = bbox.y2 - bbox.y1
dh = ht // 2
x, y = bbox.x1 + dw, bbox.y1 + dh
# calculate offsets of move
dx = (data_x - x)
dy = (data_y - y)
# calculate new coords
x1, y1, x2, y2 = bbox.x1 + dx, bbox.y1 + dy, bbox.x2 + dx, bbox.y2 + dy
try:
canvas.delete_object_by_tag(self.histtag)
except Exception:
pass
tag = canvas.add(self.dc.Rectangle(
x1, y1, x2, y2, color='cyan', linestyle='dash'))
self.draw_cb(canvas, tag)
return True
def drag(self, canvas, event, data_x, data_y, viewer):
obj = self.canvas.get_object_by_tag(self.histtag)
if obj.kind == 'compound':
bbox = obj.objects[0]
elif obj.kind == 'rectangle':
bbox = obj
else:
return True
# calculate center of bbox
wd = bbox.x2 - bbox.x1
dw = wd // 2
ht = bbox.y2 - bbox.y1
dh = ht // 2
x, y = bbox.x1 + dw, bbox.y1 + dh
# calculate offsets of move
dx = (data_x - x)
dy = (data_y - y)
# calculate new coords
x1, y1, x2, y2 = bbox.x1 + dx, bbox.y1 + dy, bbox.x2 + dx, bbox.y2 + dy
if obj.kind == 'compound':
try:
canvas.delete_object_by_tag(self.histtag)
except Exception:
pass
self.histtag = canvas.add(self.dc.Rectangle(
x1, y1, x2, y2, color='cyan', linestyle='dash'))
else:
bbox.x1, bbox.y1, bbox.x2, bbox.y2 = x1, y1, x2, y2
canvas.redraw(whence=3)
return True
def draw_cb(self, canvas, tag):
obj = canvas.get_object_by_tag(tag)
if obj.kind != 'rectangle':
return True
canvas.delete_object_by_tag(tag)
if self.histtag:
try:
canvas.delete_object_by_tag(self.histtag)
except Exception:
pass
x1, y1, x2, y2 = obj.get_llur()
tag = canvas.add(self.dc.CompoundObject(
self.dc.Rectangle(x1, y1, x2, y2,
color=self.histcolor),
self.dc.Text(x1, y2, "Histogram",
color=self.histcolor)))
self.histtag = tag
self.w.btn_move.set_enabled(True)
self.w.btn_edit.set_enabled(True)
move_flag = self.settings.get('draw_then_move', True)
if move_flag:
self.set_mode('move')
return self.redo()
def edit_cb(self, canvas, obj):
if obj.kind != 'rectangle':
return True
# Get the compound object that sits on the canvas.
# Make sure edited rectangle was our histogram rectangle.
c_obj = self.canvas.get_object_by_tag(self.histtag)
if ((c_obj.kind != 'compound') or (len(c_obj.objects) < 2) or
(c_obj.objects[0] != obj)):
return False
# reposition other elements to match
x1, y1, x2, y2 = obj.get_llur()
text = c_obj.objects[1]
text.x, text.y = x1, y2 + 4
self.fitsimage.redraw(whence=3)
return self.redo()
def cut_levels(self):
reslvls = None
try:
loval = float(self.w.cut_low.get_text())
hival = float(self.w.cut_high.get_text())
reslvls = self.fitsimage.cut_levels(loval, hival)
except Exception as e:
errmsg = 'Error cutting levels: {0}'.format(str(e))
self.fv.show_status(errmsg)
self.logger.error(errmsg)
else:
if self.xlimbycuts:
self.redo()
return reslvls
def auto_levels(self):
self.fitsimage.auto_levels()
def set_cut_by_click(self, plot, event):
"""Set cut levels by a mouse click in the histogram plot:
left: set low cut
middle: reset (auto cuts)
right: set high cut
"""
data_x = event.xdata
lo, hi = self.fitsimage.get_cut_levels()
if event.button == 1:
lo = data_x
self.fitsimage.cut_levels(lo, hi)
elif event.button == 2:
self.fitsimage.auto_levels()
elif event.button == 3:
hi = data_x
self.fitsimage.cut_levels(lo, hi)
def adjust_cuts_scroll(self, plot, event):
"""Adjust the width of the histogram by scrolling.
"""
bm = self.fitsimage.get_bindings()
pct = -self.scroll_pct
if event.step > 0:
pct = -pct
bm.cut_pct(self.fitsimage, pct)
def cutset_ext_cb(self, setting, value, fitsimage):
if not self.gui_up:
return
t_ = fitsimage.get_settings()
loval, hival = t_['cuts']
try:
self.loline.remove()
self.hiline.remove()
except Exception:
pass
self.loline = self.plot.ax.axvline(loval, 0.0, 0.99,
linestyle='-', color='black')
self.hiline = self.plot.ax.axvline(hival, 0.0, 0.99,
linestyle='-', color='black')
self.w.cut_low.set_text(str(loval))
self.w.cut_high.set_text(str(hival))
#self.plot.fig.canvas.draw()
self.redo()
def set_numbins_cb(self):
self.numbins = int(self.w.numbins.get_text())
self.redo()
def log_histogram_cb(self, w, val):
self.plot.logy = val
if (self.histtag is not None) and self.gui_up:
# self.histtag is None means no data is loaded yet
self.redo()
def plot_by_cuts_cb(self, w, val):
self.xlimbycuts = val
if (self.histtag is not None) and self.gui_up:
# self.histtag is None means no data is loaded yet
self.redo()
def edit_select_box(self):
if self.histtag is not None:
obj = self.canvas.get_object_by_tag(self.histtag)
if obj.kind != 'compound':
return True
# drill down to reference shape
bbox = obj.objects[0]
self.canvas.edit_select(bbox)
else:
self.canvas.clear_selected()
self.canvas.update_canvas()
def set_mode_cb(self, mode, tf):
"""Called when one of the Move/Draw/Edit radio buttons is selected."""
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_select_box()
return True
def set_mode(self, mode):
self.canvas.set_draw_mode(mode)
self.w.btn_move.set_state(mode == 'move')
self.w.btn_draw.set_state(mode == 'draw')
self.w.btn_edit.set_state(mode == 'edit')
def __str__(self):
return 'histogram'
# Append module docstring with config doc for auto insert by Sphinx.
from ginga.util.toolbox import generate_cfg_example # noqa
if __doc__ is not None:
__doc__ += generate_cfg_example('plugin_Histogram', package='ginga')
# END
|
bsd-3-clause
|
thientu/scikit-learn
|
examples/svm/plot_svm_regression.py
|
249
|
1451
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
bsd-3-clause
|
Guneet-Dhillon/mxnet
|
example/kaggle-ndsb1/submission_dsb.py
|
52
|
5048
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import pandas as pd
import os
import time as time
## Receives an array with probabilities for each class (columns) X images in test set (as listed in test.lst) and formats in Kaggle submission format, saves and compresses in submission_path
def gen_sub(predictions,test_lst_path="test.lst",submission_path="submission.csv"):
## append time to avoid overwriting previous submissions
## submission_path=time.strftime("%Y%m%d%H%M%S_")+submission_path
### Make submission
## check sampleSubmission.csv from kaggle website to view submission format
header = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# read first line to know the number of columns and column to use
img_lst = pd.read_csv(test_lst_path,sep="/",header=None, nrows=1)
columns = img_lst.columns.tolist() # get the columns
cols_to_use = columns[len(columns)-1] # drop the last one
cols_to_use= map(int, str(cols_to_use)) ## convert scalar to list
img_lst= pd.read_csv(test_lst_path,sep="/",header=None, usecols=cols_to_use) ## reads lst, use / as sep to goet last column with filenames
img_lst=img_lst.values.T.tolist()
df = pd.DataFrame(predictions,columns = header, index=img_lst)
df.index.name = 'image'
print("Saving csv to %s" % submission_path)
df.to_csv(submission_path)
print("Compress with gzip")
os.system("gzip -f %s" % submission_path)
print(" stored in %s.gz" % submission_path)
|
apache-2.0
|
ellisztamas/faps
|
tests/test_sires.py
|
1
|
1472
|
from faps.sibshipCluster import sibshipCluster
import numpy as np
import pandas as pd
import faps as fp
ndraws=1000
np.random.seed(867)
allele_freqs = np.random.uniform(0.3,0.5,50)
adults = fp.make_parents(100, allele_freqs, family_name='a')
def test_sires():
# Example with a single family
progeny = fp.make_sibships(adults, 0, [1,2,3], 5, 'x')
mothers = adults.subset(progeny.mothers)
patlik = fp.paternity_array(progeny, mothers, adults, mu = 0.0015, missing_parents=0.01)
sc = fp.sibship_clustering(patlik)
me = sc.sires()
assert isinstance(me, pd.DataFrame)
list(me['label'])
def test_summarise_sires():
# Example with multiple half-sib families
progeny = fp.make_offspring(parents = adults, dam_list=[7,7,7,7,7,1,8,8,0], sire_list=[2,4,4,4,4,6,3,0,7])
mothers = adults.subset(individuals=progeny.mothers)
patlik = fp.paternity_array(progeny, mothers, adults, mu = 0.0013 )
patlik = patlik.split(by=progeny.mothers)
sibships = fp.sibship_clustering(patlik)
me2 = fp.summarise_sires(sibships)
assert (me2['father'].isin(progeny.fathers)).all()
# Remove a father
patlik = fp.paternity_array(progeny, mothers, adults, mu = 0.0013, missing_parents=0.2, purge="base_4")
patlik = patlik.split(by=progeny.mothers)
sibships = fp.sibship_clustering(patlik)
me3 = fp.summarise_sires(sibships)
assert me3['father'].str.contains("base_7").any()
assert not "base_4" in me3['father']
|
mit
|
wazeerzulfikar/scikit-learn
|
sklearn/cluster/bicluster.py
|
11
|
20245
|
"""Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.linalg import norm
from scipy.sparse import dia_matrix, issparse
from scipy.sparse.linalg import eigsh, svds
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.extmath import (make_nonnegative, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X, y=None):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
|
bsd-3-clause
|
qrqiuren/sms-tools
|
software/models_interface/sineModel_function.py
|
21
|
2749
|
# function to call the main analysis/synthesis functions in software/models/sineModel.py
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import sineModel as SM
def main(inputFile='../../sounds/bendir.wav', window='hamming', M=2001, N=2048, t=-80, minSineDur=0.02,
maxnSines=150, freqDevOffset=10, freqDevSlope=0.001):
"""
Perform analysis/synthesis using the sinusoidal model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
fs, x = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# analyze the sound with the sinusoidal model
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
# synthesize the output sound from the sinusoidal representation
y = SM.sineModelSynth(tfreq, tmag, tphase, Ns, H, fs)
# output sound file name
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sineModel.wav'
# write the synthesized sound obtained from the sinusoidal synthesis
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the sinusoidal frequencies
plt.subplot(3,1,2)
if (tfreq.shape[1] > 0):
numFrames = tfreq.shape[0]
frmTime = H*np.arange(numFrames)/float(fs)
tfreq[tfreq<=0] = np.nan
plt.plot(frmTime, tfreq)
plt.axis([0, x.size/float(fs), 0, maxplotfreq])
plt.title('frequencies of sinusoidal tracks')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
|
agpl-3.0
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/models/cognitive_mapping_and_planning/tfcode/cmp_utils.py
|
14
|
6936
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for setting up the CMP graph.
"""
import os, numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.slim import arg_scope
import logging
from src import utils
import src.file_utils as fu
from tfcode import tf_utils
resnet_v2 = tf_utils.resnet_v2
custom_residual_block = tf_utils.custom_residual_block
def value_iteration_network(
fr, num_iters, val_neurons, action_neurons, kernel_size, share_wts=False,
name='vin', wt_decay=0.0001, activation_fn=None, shape_aware=False):
"""
Constructs a Value Iteration Network, convolutions and max pooling across
channels.
Input:
fr: NxWxHxC
val_neurons: Number of channels for maintaining the value.
action_neurons: Computes action_neurons * val_neurons at each iteration to
max pool over.
Output:
value image: NxHxWx(val_neurons)
"""
init_var = np.sqrt(2.0/(kernel_size**2)/(val_neurons*action_neurons))
vals = []
with tf.variable_scope(name) as varscope:
if shape_aware == False:
fr_shape = tf.unstack(tf.shape(fr))
val_shape = tf.stack(fr_shape[:-1] + [val_neurons])
val = tf.zeros(val_shape, name='val_init')
else:
val = tf.expand_dims(tf.zeros_like(fr[:,:,:,0]), dim=-1) * \
tf.constant(0., dtype=tf.float32, shape=[1,1,1,val_neurons])
val_shape = tf.shape(val)
vals.append(val)
for i in range(num_iters):
if share_wts:
# The first Value Iteration maybe special, so it can have its own
# paramterss.
scope = 'conv'
if i == 0: scope = 'conv_0'
if i > 1: varscope.reuse_variables()
else:
scope = 'conv_{:d}'.format(i)
val = slim.conv2d(tf.concat([val, fr], 3, name='concat_{:d}'.format(i)),
num_outputs=action_neurons*val_neurons,
kernel_size=kernel_size, stride=1, activation_fn=activation_fn,
scope=scope, normalizer_fn=None,
weights_regularizer=slim.l2_regularizer(wt_decay),
weights_initializer=tf.random_normal_initializer(stddev=init_var),
biases_initializer=tf.zeros_initializer())
val = tf.reshape(val, [-1, action_neurons*val_neurons, 1, 1],
name='re_{:d}'.format(i))
val = slim.max_pool2d(val, kernel_size=[action_neurons,1],
stride=[action_neurons,1], padding='VALID',
scope='val_{:d}'.format(i))
val = tf.reshape(val, val_shape, name='unre_{:d}'.format(i))
vals.append(val)
return val, vals
def rotate_preds(loc_on_map, relative_theta, map_size, preds,
output_valid_mask):
with tf.name_scope('rotate'):
flow_op = tf_utils.get_flow(loc_on_map, relative_theta, map_size=map_size)
if type(preds) != list:
rotated_preds, valid_mask_warps = tf_utils.dense_resample(preds, flow_op,
output_valid_mask)
else:
rotated_preds = [] ;valid_mask_warps = []
for pred in preds:
rotated_pred, valid_mask_warp = tf_utils.dense_resample(pred, flow_op,
output_valid_mask)
rotated_preds.append(rotated_pred)
valid_mask_warps.append(valid_mask_warp)
return rotated_preds, valid_mask_warps
def get_visual_frustum(map_size, shape_like, expand_dims=[0,0]):
with tf.name_scope('visual_frustum'):
l = np.tril(np.ones(map_size)) ;l = l + l[:,::-1]
l = (l == 2).astype(np.float32)
for e in expand_dims:
l = np.expand_dims(l, axis=e)
confs_probs = tf.constant(l, dtype=tf.float32)
confs_probs = tf.ones_like(shape_like, dtype=tf.float32) * confs_probs
return confs_probs
def deconv(x, is_training, wt_decay, neurons, strides, layers_per_block,
kernel_size, conv_fn, name, offset=0):
"""Generates a up sampling network with residual connections.
"""
batch_norm_param = {'center': True, 'scale': True,
'activation_fn': tf.nn.relu,
'is_training': is_training}
outs = []
for i, (neuron, stride) in enumerate(zip(neurons, strides)):
for s in range(layers_per_block):
scope = '{:s}_{:d}_{:d}'.format(name, i+1+offset,s+1)
x = custom_residual_block(x, neuron, kernel_size, stride, scope,
is_training, wt_decay, use_residual=True,
residual_stride_conv=True, conv_fn=conv_fn,
batch_norm_param=batch_norm_param)
stride = 1
outs.append((x,True))
return x, outs
def fr_v2(x, output_neurons, inside_neurons, is_training, name='fr',
wt_decay=0.0001, stride=1, updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Performs fusion of information between the map and the reward map.
Inputs
x: NxHxWxC1
Outputs
fr map: NxHxWx(output_neurons)
"""
if type(stride) != list:
stride = [stride]
with slim.arg_scope(resnet_v2.resnet_utils.resnet_arg_scope(
is_training=is_training, weight_decay=wt_decay)):
with slim.arg_scope([slim.batch_norm], updates_collections=updates_collections) as arg_sc:
# Change the updates_collections for the conv normalizer_params to None
for i in range(len(arg_sc.keys())):
if 'convolution' in arg_sc.keys()[i]:
arg_sc.values()[i]['normalizer_params']['updates_collections'] = updates_collections
with slim.arg_scope(arg_sc):
bottleneck = resnet_v2.bottleneck
blocks = []
for i, s in enumerate(stride):
b = resnet_v2.resnet_utils.Block(
'block{:d}'.format(i + 1), bottleneck, [{
'depth': output_neurons,
'depth_bottleneck': inside_neurons,
'stride': stride[i]
}])
blocks.append(b)
x, outs = resnet_v2.resnet_v2(x, blocks, num_classes=None, global_pool=False,
output_stride=None, include_root_block=False,
reuse=False, scope=name)
return x, outs
|
bsd-2-clause
|
ephes/scikit-learn
|
doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py
|
254
|
2253
|
"""Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
bsd-3-clause
|
gtoonstra/airflow
|
tests/contrib/hooks/test_bigquery_hook.py
|
4
|
17580
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import warnings
from google.auth.exceptions import GoogleAuthError
import mock
from airflow.contrib.hooks import bigquery_hook as hook
from airflow.contrib.hooks.bigquery_hook import _cleanse_time_partitioning
bq_available = True
try:
hook.BigQueryHook().get_service()
except GoogleAuthError:
bq_available = False
class TestBigQueryDataframeResults(unittest.TestCase):
def setUp(self):
self.instance = hook.BigQueryHook()
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_output_is_dataframe_with_valid_query(self):
import pandas as pd
df = self.instance.get_pandas_df('select 1')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_invalid_query(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('from `1`')
self.assertIn('Reason: ', str(context.exception), "")
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_succeeds_with_explicit_legacy_query(self):
df = self.instance.get_pandas_df('select 1', dialect='legacy')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_succeeds_with_explicit_std_query(self):
df = self.instance.get_pandas_df(
'select * except(b) from (select 1 a, 2 b)', dialect='standard')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_incompatible_syntax(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df(
'select * except(b) from (select 1 a, 2 b)', dialect='legacy')
self.assertIn('Reason: ', str(context.exception), "")
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('dataset.table', None)
self.assertIn('INTERNAL: No default project is specified',
str(context.exception), "")
def test_split_dataset_table(self):
project, dataset, table = hook._split_tablename('dataset.table',
'project')
self.assertEqual("project", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative:dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_sql_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative.dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_colon_in_project(self):
project, dataset, table = hook._split_tablename('alt1:alt.dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_valid_double_column(self):
project, dataset, table = hook._split_tablename('alt1:alt:dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_invalid_syntax_triple_colon(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt3:dataset.table',
'project')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_triple_dot(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_column_double_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt.dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_colon_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt:dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_dot_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project', 'var_x')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
class TestBigQueryHookSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test", "test_schema.json", ["test_data.json"], source_format="json"
)
# since we passed 'json' in, and it's not valid, make sure it's present in the
# error string.
self.assertIn("JSON", str(context.exception))
class TestBigQueryExternalTableSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").create_external_table(
external_project_dataset_table='test.test',
schema_fields='test_schema.json',
source_uris=['test_data.json'],
source_format='json'
)
# since we passed 'csv' in, and it's not valid, make sure it's present in the
# error string.
self.assertIn("JSON", str(context.exception))
# Helpers to test_cancel_queries that have mock_poll_job_complete returning false,
# unless mock_job_cancel was called with the same job_id
mock_canceled_jobs = []
def mock_poll_job_complete(job_id):
return job_id in mock_canceled_jobs
def mock_job_cancel(projectId, jobId):
mock_canceled_jobs.append(jobId)
return mock.Mock()
class TestBigQueryBaseCursor(unittest.TestCase):
def test_invalid_schema_update_options(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"]
)
self.assertIn("THIS IS NOT VALID", str(context.exception))
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_bql_deprecation_warning(self, mock_rwc):
with warnings.catch_warnings(record=True) as w:
hook.BigQueryBaseCursor("test", "test").run_query(
bql='select * from test_table'
)
self.assertIn(
'Deprecated parameter `bql`',
w[0].message.args[0])
def test_nobql_nosql_param_error(self):
with self.assertRaises(TypeError) as context:
hook.BigQueryBaseCursor("test", "test").run_query(
sql=None,
bql=None
)
self.assertIn(
'missing 1 required positional',
str(context.exception))
def test_invalid_schema_update_and_write_disposition(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY'
)
self.assertIn("schema_update_options is only", str(context.exception))
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
def test_cancel_queries(self, mocked_time, mocked_logging):
project_id = 12345
running_job_id = 3
mock_jobs = mock.Mock()
mock_jobs.cancel = mock.Mock(side_effect=mock_job_cancel)
mock_service = mock.Mock()
mock_service.jobs = mock.Mock(return_value=mock_jobs)
bq_hook = hook.BigQueryBaseCursor(mock_service, project_id)
bq_hook.running_job_id = running_job_id
bq_hook.poll_job_complete = mock.Mock(side_effect=mock_poll_job_complete)
bq_hook.cancel_query()
mock_jobs.cancel.assert_called_with(projectId=project_id, jobId=running_job_id)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_sql_dialect_default(self, run_with_config):
cursor = hook.BigQueryBaseCursor(mock.Mock(), "project_id")
cursor.run_query('query')
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], True)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_sql_dialect_override(self, run_with_config):
for bool_val in [True, False]:
cursor = hook.BigQueryBaseCursor(mock.Mock(), "project_id")
cursor.run_query('query', use_legacy_sql=bool_val)
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], bool_val)
class TestLabelsInRunJob(unittest.TestCase):
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_with_arg(self, mocked_rwc):
project_id = 12345
def run_with_config(config):
self.assertEqual(
config['labels'], {'label1': 'test1', 'label2': 'test2'}
)
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
labels={'label1': 'test1', 'label2': 'test2'}
)
mocked_rwc.assert_called_once()
class TestTimePartitioningInRunJob(unittest.TestCase):
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_load_default(self, mocked_rwc, mocked_time, mocked_logging):
project_id = 12345
def run_with_config(config):
self.assertIsNone(config['load'].get('timePartitioning'))
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
mocked_rwc.assert_called_once()
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_load_with_arg(self, mocked_rwc, mocked_time, mocked_logging):
project_id = 12345
def run_with_config(config):
self.assertEqual(
config['load']['timePartitioning'],
{
'field': 'test_field',
'type': 'DAY',
'expirationMs': 1000
}
)
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
mocked_rwc.assert_called_once()
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_default(self, mocked_rwc, mocked_time, mocked_logging):
project_id = 12345
def run_with_config(config):
self.assertIsNone(config['query'].get('timePartitioning'))
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_query(sql='select 1')
mocked_rwc.assert_called_once()
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_with_arg(self, mocked_rwc, mocked_time, mocked_logging):
project_id = 12345
def run_with_config(config):
self.assertEqual(
config['query']['timePartitioning'],
{
'field': 'test_field',
'type': 'DAY',
'expirationMs': 1000
}
)
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
time_partitioning={'type': 'DAY',
'field': 'test_field', 'expirationMs': 1000}
)
mocked_rwc.assert_called_once()
def test_dollar_makes_partition(self):
tp_out = _cleanse_time_partitioning('test.teast$20170101', {})
expect = {
'type': 'DAY'
}
self.assertEqual(tp_out, expect)
def test_extra_time_partitioning_options(self):
tp_out = _cleanse_time_partitioning(
'test.teast',
{'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
expect = {
'type': 'DAY',
'field': 'test_field',
'expirationMs': 1000
}
self.assertEqual(tp_out, expect)
def test_cant_add_dollar_and_field_name(self):
with self.assertRaises(ValueError):
_cleanse_time_partitioning(
'test.teast$20170101',
{'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
class TestBigQueryHookLegacySql(unittest.TestCase):
"""Ensure `use_legacy_sql` param in `BigQueryHook` propagates properly."""
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_hook_uses_legacy_sql_by_default(self, run_with_config):
with mock.patch.object(hook.BigQueryHook, 'get_service'):
bq_hook = hook.BigQueryHook()
bq_hook.get_first('query')
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], True)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_legacy_sql_override_propagates_properly(self, run_with_config):
with mock.patch.object(hook.BigQueryHook, 'get_service'):
bq_hook = hook.BigQueryHook(use_legacy_sql=False)
bq_hook.get_first('query')
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], False)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
RachitKansal/scikit-learn
|
sklearn/utils/arpack.py
|
265
|
64837
|
"""
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
|
bsd-3-clause
|
xguse/ggplot
|
ggplot/stats/stat_bin2d.py
|
4
|
2678
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import defaultdict
import pandas as pd
import numpy as np
from ggplot.utils import make_iterable_ntimes
from .stat import stat
_MSG_STATUS = """stat_bin2d is still under construction.
The resulting plot lacks color to indicate the counts/density in each bin
and if grouping/facetting is used you get more bins than specified and
they vary in size between the groups.
see: https://github.com/yhat/ggplot/pull/266#issuecomment-41355513
https://github.com/yhat/ggplot/issues/283
"""
class stat_bin2d(stat):
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'geom': 'rect', 'position': 'identity',
'bins': 30, 'drop': True, 'weight': 1,
'right': False}
CREATES = {'xmin', 'xmax', 'ymin', 'ymax', 'fill'}
def _calculate(self, data):
self._print_warning(_MSG_STATUS)
x = data.pop('x')
y = data.pop('y')
bins = self.params['bins']
drop = self.params['drop']
right = self.params['right']
weight = make_iterable_ntimes(self.params['weight'], len(x))
# create the cutting parameters
x_assignments, xbreaks = pd.cut(x, bins=bins, labels=False,
right=right, retbins=True)
y_assignments, ybreaks = pd.cut(y, bins=bins, labels=False,
right=right, retbins=True)
# create rectangles
# xmin, xmax, ymin, ymax, fill=count
df = pd.DataFrame({'xbin': x_assignments,
'ybin': y_assignments,
'weights': weight})
table = pd.pivot_table(df, values='weights',
rows=['xbin', 'ybin'], aggfunc=np.sum)
rects = np.array([[xbreaks[i], xbreaks[i+1],
ybreaks[j], ybreaks[j+1],
table[(i, j)]]
for (i, j) in table.keys()])
new_data = pd.DataFrame(rects, columns=['xmin', 'xmax',
'ymin', 'ymax',
'fill'])
# !!! assign colors???
# TODO: Remove this when visual mapping is applied after
# computing the stats
new_data['fill'] = ['#333333'] * len(new_data)
# Copy the other aesthetics into the new dataframe
# Note: There probably shouldn't be any for this stat
n = len(new_data)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
|
bsd-2-clause
|
BrainIntensive/OnlineBrainIntensive
|
resources/HCP/mne-hcp/tutorials/plot_temporal_searchlight_decoding.py
|
3
|
3094
|
# -*- coding: utf-8 -*-
"""
.. _tut_searchlight_decoding:
=======================================================
Run temporal searchlight decoding on event related data
=======================================================
In this tutorial we show how to run a temporal window decoding
on event related data. We'll try to decode tools VS faces in the
working memory data.
"""
# Author: Denis A. Engemann
# License: BSD 3 clause
import os.path as op
import numpy as np
import mne
import hcp
from hcp import preprocessing as preproc
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import StratifiedKFold
from mne.decoding import GeneralizationAcrossTime
mne.set_log_level('WARNING')
# we assume our data is inside its designated folder under $HOME
storage_dir = op.expanduser('~')
hcp_params = dict(
hcp_path=op.join(storage_dir, 'mne-hcp-data', 'HCP'),
subject='105923',
data_type='task_working_memory')
# these values are looked up from the HCP manual
tmin, tmax = -1.5, 2.5
decim = 3
##############################################################################
# We know from studying either the manual or the trial info about the mapping
# of events.
event_id = dict(face=1, tool=2)
##############################################################################
# we first collect epochs across runs and essentially adopt the code
# shown in :ref:`tut_reproduce_erf`.
epochs = list()
for run_index in [0, 1]:
hcp_params['run_index'] = run_index
trial_info = hcp.read_trial_info(**hcp_params)
events = np.c_[
trial_info['stim']['codes'][:, 6] - 1, # time sample
np.zeros(len(trial_info['stim']['codes'])),
trial_info['stim']['codes'][:, 3] # event codes
].astype(int)
# for some reason in the HCP data the time events may not always be unique
unique_subset = np.nonzero(np.r_[1, np.diff(events[:, 0])])[0]
events = events[unique_subset] # use diff to find first unique events
subset = np.in1d(events[:, 2], event_id.values())
epochs_hcp = hcp.read_epochs(**hcp_params).decimate(decim)
epochs_hcp = epochs_hcp[unique_subset][subset]
epochs_hcp.events[:, 2] = events[subset, 2]
epochs_hcp.event_id = event_id
epochs_hcp.crop(-0.1, 0.5)
epochs.append(preproc.interpolate_missing(epochs_hcp, **hcp_params))
epochs = mne.concatenate_epochs(epochs)
del epochs_hcp
##############################################################################
# Now we can proceed as shown in the MNE-Python decoding tutorials
y = LabelBinarizer().fit_transform(epochs.events[:, 2]).ravel()
cv = StratifiedKFold(y=y) # do a stratified cross-validation
gat = GeneralizationAcrossTime(predict_mode='cross-validation', n_jobs=1,
cv=cv, scorer=roc_auc_score)
# fit and score
gat.fit(epochs, y=y)
gat.score(epochs)
##############################################################################
# Ploting the temporal connectome and the evolution of discriminability.
gat.plot()
gat.plot_diagonal()
|
mit
|
amchagas/python-neo
|
doc/source/images/generate_diagram.py
|
7
|
7892
|
# -*- coding: utf-8 -*-
"""
This generate diagram in .png and .svg from neo.core
Author: sgarcia
"""
from datetime import datetime
import numpy as np
import quantities as pq
from matplotlib import pyplot
from matplotlib.patches import Rectangle, ArrowStyle, FancyArrowPatch
from matplotlib.font_manager import FontProperties
from neo.test.generate_datasets import fake_neo
line_heigth = .22
fontsize = 10.5
left_text_shift = .1
dpi = 100
def get_rect_height(name, obj):
'''
calculate rectangle height
'''
nlines = 1.5
nlines += len(getattr(obj, '_all_attrs', []))
nlines += len(getattr(obj, '_single_child_objects', []))
nlines += len(getattr(obj, '_multi_child_objects', []))
nlines += len(getattr(obj, '_multi_parent_objects', []))
return nlines*line_heigth
def annotate(ax, coord1, coord2, connectionstyle, color, alpha):
arrowprops = dict(arrowstyle='fancy',
#~ patchB=p,
shrinkA=.3, shrinkB=.3,
fc=color, ec=color,
connectionstyle=connectionstyle,
alpha=alpha)
bbox = dict(boxstyle="square", fc="w")
a = ax.annotate('', coord1, coord2,
#xycoords="figure fraction",
#textcoords="figure fraction",
ha="right", va="center",
size=fontsize,
arrowprops=arrowprops,
bbox=bbox)
a.set_zorder(-4)
def calc_coordinates(pos, height):
x = pos[0]
y = pos[1] + height - line_heigth*.5
return pos[0], y
def generate_diagram(filename, rect_pos, rect_width, figsize):
rw = rect_width
fig = pyplot.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1])
all_h = {}
objs = {}
for name in rect_pos:
objs[name] = fake_neo(name)
all_h[name] = get_rect_height(name, objs[name])
# draw connections
color = ['c', 'm', 'y']
alpha = [1., 1., 0.3]
for name, pos in rect_pos.items():
obj = objs[name]
relationships = [getattr(obj, '_single_child_objects', []),
getattr(obj, '_multi_child_objects', []),
getattr(obj, '_child_properties', [])]
for r in range(3):
for ch_name in relationships[r]:
x1, y1 = calc_coordinates(rect_pos[ch_name], all_h[ch_name])
x2, y2 = calc_coordinates(pos, all_h[name])
if r in [0, 2]:
x2 += rect_width
connectionstyle = "arc3,rad=-0.2"
elif y2 >= y1:
connectionstyle = "arc3,rad=0.7"
else:
connectionstyle = "arc3,rad=-0.7"
annotate(ax=ax, coord1=(x1, y1), coord2=(x2, y2),
connectionstyle=connectionstyle,
color=color[r], alpha=alpha[r])
# draw boxes
for name, pos in rect_pos.items():
htotal = all_h[name]
obj = objs[name]
allrelationship = (getattr(obj, '_child_containers', []) +
getattr(obj, '_multi_parent_containers', []))
rect = Rectangle(pos, rect_width, htotal,
facecolor='w', edgecolor='k', linewidth=2.)
ax.add_patch(rect)
# title green
pos2 = pos[0], pos[1]+htotal - line_heigth*1.5
rect = Rectangle(pos2, rect_width, line_heigth*1.5,
facecolor='g', edgecolor='k', alpha=.5, linewidth=2.)
ax.add_patch(rect)
# single relationship
relationship = getattr(obj, '_single_child_objects', [])
pos2 = pos[1] + htotal - line_heigth*(1.5+len(relationship))
rect_height = len(relationship)*line_heigth
rect = Rectangle((pos[0], pos2), rect_width, rect_height,
facecolor='c', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# multi relationship
relationship = (getattr(obj, '_multi_child_objects', []) +
getattr(obj, '_multi_parent_containers', []))
pos2 = (pos[1]+htotal - line_heigth*(1.5+len(relationship)) -
rect_height)
rect_height = len(relationship)*line_heigth
rect = Rectangle((pos[0], pos2), rect_width, rect_height,
facecolor='m', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# necessary attr
pos2 = (pos[1]+htotal -
line_heigth*(1.5+len(allrelationship) +
len(obj._necessary_attrs)))
rect = Rectangle((pos[0], pos2), rect_width,
line_heigth*len(obj._necessary_attrs),
facecolor='r', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# name
if hasattr(obj, '_quantity_attr'):
post = '* '
else:
post = ''
ax.text(pos[0]+rect_width/2., pos[1]+htotal - line_heigth*1.5/2.,
name+post,
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize+2,
fontproperties=FontProperties(weight='bold'),
)
#relationship
for i, relat in enumerate(allrelationship):
ax.text(pos[0]+left_text_shift, pos[1]+htotal - line_heigth*(i+2),
relat+': list',
horizontalalignment='left', verticalalignment='center',
fontsize=fontsize,
)
# attributes
for i, attr in enumerate(obj._all_attrs):
attrname, attrtype = attr[0], attr[1]
t1 = attrname
if (hasattr(obj, '_quantity_attr') and
obj._quantity_attr == attrname):
t1 = attrname+'(object itself)'
else:
t1 = attrname
if attrtype == pq.Quantity:
if attr[2] == 0:
t2 = 'Quantity scalar'
else:
t2 = 'Quantity %dD' % attr[2]
elif attrtype == np.ndarray:
t2 = "np.ndarray %dD dt='%s'" % (attr[2], attr[3].kind)
elif attrtype == datetime:
t2 = 'datetime'
else:
t2 = attrtype.__name__
t = t1+' : '+t2
ax.text(pos[0]+left_text_shift,
pos[1]+htotal - line_heigth*(i+len(allrelationship)+2),
t,
horizontalalignment='left', verticalalignment='center',
fontsize=fontsize,
)
xlim, ylim = figsize
ax.set_xlim(0, xlim)
ax.set_ylim(0, ylim)
ax.set_xticks([])
ax.set_yticks([])
fig.savefig(filename, dpi=dpi)
def generate_diagram_simple():
figsize = (18, 12)
rw = rect_width = 3.
bf = blank_fact = 1.2
rect_pos = {'Block': (.5+rw*bf*0, 4),
'Segment': (.5+rw*bf*1, .5),
'Event': (.5+rw*bf*4, 6),
'EventArray': (.5+rw*bf*4, 4),
'Epoch': (.5+rw*bf*4, 2),
'EpochArray': (.5+rw*bf*4, .2),
'RecordingChannelGroup': (.5+rw*bf*.8, 8.5),
'RecordingChannel': (.5+rw*bf*1.2, 5.5),
'Unit': (.5+rw*bf*2., 9.5),
'SpikeTrain': (.5+rw*bf*3, 9.5),
'Spike': (.5+rw*bf*3, 7.5),
'IrregularlySampledSignal': (.5+rw*bf*3, 4.9),
'AnalogSignal': (.5+rw*bf*3, 2.7),
'AnalogSignalArray': (.5+rw*bf*3, .5),
}
generate_diagram('simple_generated_diagram.svg',
rect_pos, rect_width, figsize)
generate_diagram('simple_generated_diagram.png',
rect_pos, rect_width, figsize)
if __name__ == '__main__':
generate_diagram_simple()
pyplot.show()
|
bsd-3-clause
|
kevinpetersavage/BOUT-dev
|
examples/elm-pb/Python/plotmode2.py
|
3
|
1596
|
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
from numpy import *;
#from scipy.io import readsav;
import matplotlib.pyplot as plt;
from boutdata import collect
# Dynamic matplotlib settings
from matplotlib import rcParams;
rcParams['font.size'] = 20;
rcParams['legend.fontsize'] = 'small';
rcParams['legend.labelspacing'] = 0.1;
rcParams['lines.linewidth'] = 2;
rcParams['savefig.bbox'] = 'tight';
# Create image directory if not exists
import os;
if not os.path.exists('image'):
os.makedirs('image');
path='./data/'
data=collect('P',path=path)
#fphi = transpose(readsav('fphi.idl.dat')['fphi'])[:,:,:,];
fphi = fft.fft(data, axis=3)
plt.figure();
for i in range(1, 9):
print("Growth rate for mode number", i)
print(gradient(log(abs(fphi[:,34, 32, i]))))
plt.semilogy(((abs(fphi[:,34, 32, i]))), label = 'n=' + str(i * 5));
plt.legend(loc=2);
plt.xlabel('Time');
plt.savefig('image/plotmode.png');
plt.savefig('image/plotmode.eps');
plt.show(block=False);
plt.figure();
for i in range(1, 9):
plt.plot(abs(fphi[-1, :, 32, i]), label = 'n=' + str(i * 5));
plt.legend();
plt.xlabel('X index');
plt.savefig('image/plotmodeamp.png');
plt.savefig('image/plotmodeamp.eps');
plt.show(block=False);
plt.figure();
for i in range(1, 9):
plt.plot(old_div(abs(fphi[-1, :, 32, i]),abs(fphi[-1, :, 32, i]).max()), label = 'n=' + str(i * 5));
plt.legend();
plt.xlabel('X index');
plt.savefig('image/plotmodenorm.png');
plt.savefig('image/plotmodenorm.eps');
plt.show();
|
gpl-3.0
|
rudischilder/gr10_2
|
sw/airborne/test/ahrs/ahrs_utils.py
|
86
|
4923
|
#! /usr/bin/env python
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print("\nBuilding ahrs")
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_" + ahrs_type] + build_opt
#print(args)
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print(" # " + i, end=' ')
print()
print("Running simulation")
print(" using traj " + str(traj_nb))
p = subprocess.Popen(args=["./run_ahrs_on_synth", str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print(" "+i, end=' ')
# print("\n")
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')]
mydescr = np.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print(" " + line, end=' ')
else:
fields = line.strip().split(' ')
#print(fields)
for i, number in enumerate(fields):
data[i].append(number)
print()
for i in xrange(len(mydescr)):
data[i] = np.cast[mydescr[i]](data[i])
return np.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, label, sim_res):
print("Plotting Results")
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty, label=label)
plt.ylabel('degres')
plt.title('phi')
plt.legend()
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_ahrs, lsty)
plt.title('theta')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_ahrs, lsty)
plt.title('psi')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
plt.ylabel('degres/s')
plt.title('p')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
plt.title('q')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
plt.title('r')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
plt.ylabel('degres/s')
plt.xlabel('time in s')
plt.title('bp')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
plt.xlabel('time in s')
plt.title('bq')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
plt.xlabel('time in s')
plt.title('br')
if plot_true_state:
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_true, 'r--')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_true, 'r--')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_true, 'r--')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_true, 'r--')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_true, 'r--')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_true, 'r--')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_true, 'r--')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show()
|
gpl-2.0
|
LeagueForHacker/Python-Prototypes-Demo
|
model/sms_spam_detection.py
|
1
|
9268
|
# -*- coding: UTF-8 -*-
"""
Description: SMS Spam Detection
Author: Wallace Huang
Date: 2019/7/22
Version: 1.0
"""
import os
import re
import string
from collections import Counter
import nltk
import pandas as pd
from nltk.corpus import stopwords
from scipy.sparse import csr_matrix
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attr):
self.attr_names = attr
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.rename(columns={"v1": "Type", "v2": "Text"})
return X[self.attr_names]
class AddExtraAttr(BaseEstimator, TransformerMixin):
def __init__(self, attr):
self.attr_name = attr
def fit(self, X, y=None):
return self
def transform(self, X):
X = X.values
X = pd.DataFrame(X, columns=[self.attr_name])
X['Length'] = X[self.attr_name].apply(len)
# findCapitalCount = lambda x: sum(map(str.isupper, x[self.attr_name].split()))
# X["CapitalCount"] = X.apply(findCapitalCount, axis=1)
# findWordCount = lambda x: len(x[self.attr_name].split())
# X["WordCount"] = X.apply(findWordCount, axis=1)
# X["CapitalRate"] = X["CapitalCount"] / X["WordCount"]
return X[["Length"]]
class FindCount(BaseEstimator, TransformerMixin):
def __init__(self, attr):
self.attr_name = attr
def fit(self, X, y=None):
return self
def transform(self, X):
Y = []
for index, row in X.iterrows():
row[self.attr_name] = re.sub(r'\d+(?:\.\d*(?:[eE]\d+))?', 'NUMBER', row[self.attr_name])
no_punc = [ch for ch in row[self.attr_name] if ch not in string.punctuation]
punc = [(" " + ch + " ") for ch in row[self.attr_name] if ch in string.punctuation]
word_list = "".join(no_punc + punc).split()
useful_words = [word.lower() for word in word_list if word.lower() not in stopwords.words("english")]
word_counts = Counter(useful_words)
stemmed_word_counts = Counter()
for word, count in word_counts.items():
stemmed_word = stemmer.stem(word)
stemmed_word_counts[stemmed_word] += count
word_counts = stemmed_word_counts
Y.append(word_counts)
return Y
class ConvertToVector(BaseEstimator, TransformerMixin):
def __init__(self, vocabulary_size=1000):
self.vocabulary_size = vocabulary_size
def fit(self, X, y=None):
total_count = Counter()
for word_count in X:
for word, count in word_count.items():
total_count[word] += count
most_common = total_count.most_common()[:self.vocabulary_size]
self.most_common_ = most_common
self.vocabulary_ = {word: index + 1 for index, (word, count) in enumerate(most_common)}
return self
def transform(self, X, y=None):
rows = []
cols = []
data = []
for row, word_count in enumerate(X):
for word, count in word_count.items():
rows.append(row)
cols.append(self.vocabulary_.get(word, 0))
data.append(count)
return csr_matrix((data, (rows, cols)), shape=(len(X), self.vocabulary_size + 1))
if __name__ == '__main__':
sample_file = '../resources/spam.csv'
if not os.access(sample_file, os.F_OK):
print('FileNotFoundError: %s not found.' % sample_file)
exit(-1)
else:
data = pd.read_csv(sample_file, encoding='latin-1', low_memory=True)
print(data.head(1))
# print(data.info)
unimportant_col = ["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"]
data.drop(unimportant_col, axis=1, inplace=True)
print(data.head(1))
data.rename(columns={"v1": "Type", "v2": "Text"}, inplace=True)
print(data.head(1))
train_data, test_data = train_test_split(data, test_size=0.2, random_state=42)
train_data.info()
test_data.info()
print(train_data.describe())
print(train_data.groupby("Type").describe())
# train_data.Type.value_counts().plot.pie(autopct='%0.2f%%', shadow=True)
# plt.show()
train_data['Length'] = train_data['Text'].apply(len)
print(train_data.head(5))
# train_data.hist(column='Length', by='Type', bins=60, figsize=(12, 4))
# plt.show()
findCapitalCount = lambda x: sum(map(str.isupper, x['Text'].split()))
train_data["CapitalCount"] = train_data.apply(findCapitalCount, axis=1)
print(train_data.head(5))
# train_data.hist(column='CapitalCount', by='Type', bins=30, figsize=(12, 4))
# plt.show()
findWordCount = lambda x: len(x['Text'].split())
train_data["WordCount"] = train_data.apply(findWordCount, axis=1)
print(train_data.head(5))
train_data["CapitalRate"] = train_data["CapitalCount"] / train_data["WordCount"]
print(train_data.head(5))
# train_data.hist(column='CapitalRate',by='Type',bins=30,figsize=(12,4))
# plt.show()
stopwords.words("english")
stemmer = nltk.PorterStemmer()
for word in ("Computations", "Computation", "Computing", "Computed", "Compute", "Compulsive"):
print(word, "=>", stemmer.stem(word))
cat_attr = ["Text"]
main_pipeline = Pipeline([
('selector', DataFrameSelector(cat_attr)),
('find_count', FindCount(cat_attr[0])),
('convert_to_vector', ConvertToVector()),
])
extra_pipeline = Pipeline([
('selector', DataFrameSelector(cat_attr)),
('add_extra', AddExtraAttr(cat_attr[0])),
])
full_pipeline = FeatureUnion(transformer_list=[
('main_pipeline', main_pipeline),
('extra_pipeline', extra_pipeline),
])
train_prepared = full_pipeline.fit_transform(train_data)
y_train = train_data["Type"]
encoder = LabelEncoder()
type_encoded = encoder.fit_transform(y_train)
# Logistic Regression
log_clf = LogisticRegression(solver="liblinear", random_state=42)
score = cross_val_score(log_clf, train_prepared, type_encoded, cv=3, verbose=3)
score.mean()
# SGD
sgd_clf = SGDClassifier(random_state=42)
score = cross_val_score(sgd_clf, train_prepared, type_encoded, cv=3, verbose=3)
score.mean()
# MultinomialNB
mnb_clf = MultinomialNB()
score = cross_val_score(mnb_clf, train_prepared, type_encoded, cv=3, verbose=3)
score.mean()
# BernoulliNB
bnb_clf = BernoulliNB()
score = cross_val_score(bnb_clf, train_prepared, type_encoded, cv=3, verbose=3)
score.mean()
from sklearn.metrics import precision_score, recall_score
y_test = test_data["Type"]
encoder = LabelEncoder()
type_encoded = encoder.fit_transform(y_test)
X_test_transformed = full_pipeline.transform(test_data)
bnb_clf = BernoulliNB()
bnb_clf.fit(train_prepared, y_train)
y_pred = bnb_clf.predict(X_test_transformed)
encoder = LabelEncoder()
y_pred = encoder.fit_transform(y_pred)
print("Precision BernoulliNB: {:.2f}%".format(100 * precision_score(type_encoded, y_pred)))
print("Recall BernoulliNB: {:.2f}%\n".format(100 * recall_score(type_encoded, y_pred)))
mnb_clf = MultinomialNB()
mnb_clf.fit(train_prepared, y_train)
y_pred = mnb_clf.predict(X_test_transformed)
encoder = LabelEncoder()
y_pred = encoder.fit_transform(y_pred)
print("Precision MultinomialNB: {:.2f}%".format(100 * precision_score(type_encoded, y_pred)))
print("Recall MultinomialNB: {:.2f}%\n".format(100 * recall_score(type_encoded, y_pred)))
sgd_clf = SGDClassifier(random_state=42)
sgd_clf.fit(train_prepared, y_train)
y_pred = sgd_clf.predict(X_test_transformed)
encoder = LabelEncoder()
y_pred = encoder.fit_transform(y_pred)
print("Precision SGD: {:.2f}%".format(100 * precision_score(type_encoded, y_pred)))
print("Recall SGD: {:.2f}%\n".format(100 * recall_score(type_encoded, y_pred)))
log_clf = LogisticRegression(solver="liblinear", random_state=42)
log_clf.fit(train_prepared, y_train)
y_pred = log_clf.predict(X_test_transformed)
encoder = LabelEncoder()
y_pred = encoder.fit_transform(y_pred)
print("Precision Logistic Regression: {:.2f}%".format(100 * precision_score(type_encoded, y_pred)))
print("Recall Logistic Regression: {:.2f}%".format(100 * recall_score(type_encoded, y_pred)))
|
gpl-2.0
|
vigilv/scikit-learn
|
sklearn/covariance/__init__.py
|
389
|
1157
|
"""
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
|
bsd-3-clause
|
miguelinux/ejemplos-opencv
|
python/ocv4/common.py
|
3
|
6679
|
#!/usr/bin/env python
'''
This module contains some common routines used by other samples.
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
from functools import reduce
import numpy as np
import cv2 as cv
# built-in modules
import os
import itertools as it
from contextlib import contextmanager
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, target, s):
x, y = target
cv.putText(dst, s, (x+1, y+1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv.LINE_AA)
cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv.EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv.EVENT_LBUTTONUP:
self.prev_pt = None
if self.prev_pt and flags & cv.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv.getTickCount() / cv.getTickFrequency()
@contextmanager
def Timer(msg):
print(msg, '...',)
start = clock()
try:
yield
finally:
print("%.2f ms" % ((clock()-start)*1000))
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
return
if self.drag_start:
if flags & cv.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
if PY3:
output = it.zip_longest(fillvalue=fillvalue, *args)
else:
output = it.izip_longest(fillvalue=fillvalue, *args)
return output
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
if PY3:
img0 = next(imgs)
else:
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv.circle(vis, (int(x), int(y)), 2, color)
|
bsd-3-clause
|
ChinaQuants/zipline
|
tests/test_munge.py
|
34
|
1794
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import pandas as pd
import numpy as np
from numpy.testing import assert_almost_equal
from unittest import TestCase
from zipline.utils.munge import bfill, ffill
class MungeTests(TestCase):
def test_bfill(self):
# test ndim=1
N = 100
s = pd.Series(np.random.randn(N))
mask = random.sample(range(N), 10)
s.iloc[mask] = np.nan
correct = s.bfill().values
test = bfill(s.values)
assert_almost_equal(correct, test)
# test ndim=2
df = pd.DataFrame(np.random.randn(N, N))
df.iloc[mask] = np.nan
correct = df.bfill().values
test = bfill(df.values)
assert_almost_equal(correct, test)
def test_ffill(self):
# test ndim=1
N = 100
s = pd.Series(np.random.randn(N))
mask = random.sample(range(N), 10)
s.iloc[mask] = np.nan
correct = s.ffill().values
test = ffill(s.values)
assert_almost_equal(correct, test)
# test ndim=2
df = pd.DataFrame(np.random.randn(N, N))
df.iloc[mask] = np.nan
correct = df.ffill().values
test = ffill(df.values)
assert_almost_equal(correct, test)
|
apache-2.0
|
danlwo/Hadoop-Spark-Python-Log-Parser
|
def_400_visualizations/def_400_pickle2CSV_distCount/def_400_pickle2CSV_distCount.py
|
1
|
4777
|
# Christina CJ Chen & Dan Lwo from Logitech
# python 2.7.10 $ spark-2.1.1-bin-hadoop2.7
# --- CAUTION! SAFETY RISK! ---
import credencialInfo
# --- Make sure env. variables set! ---
import findspark
findspark.init()
print '--- [INfO] FINDSPARK session successfully FINISHED. ---'
# --- Improt necessary modules ---
import boto3
import csv
import geoip2.database
import gzip
import itertools
import logging
import numpy as np
import os
import pandas as pd
import pickle
import smtplib
import sys
import traceback
from operator import add
from os.path import expanduser
from pyspark import SparkContext, SparkConf
from pyspark import SparkFiles
# --- Functions for save / load pickle files ---
def loadPickle(obj):
with open(obj) as f:
pickleContent = pickle.load(f)
return pickleContent
def savePickle(filename, save_obj):
with open(filename+'.pickle','w') as f:
pickle.dump(save_obj,f)
# --- Walk through S3 bucket for needed source data file ---
def awsS3FileWalker(srcDataBucket,year,month):
s3 = boto3.resource('s3')
bucket = s3.Bucket(srcDataBucket)
executionTimeRange = year+'_'+month
for obj in bucket.objects.filter(Prefix = executionTimeRange):
# print '--- [INFO] Find: ',obj
srcFileCollection.append(obj.key)
return srcFileCollection
# --- Group source data files by hour and sort ---
def fileGrouper(srcFileCollection):
lines = sc.parallelize(srcFileCollection)
groupedFileList = lines.map(lambda x:(x[:10],x)).groupByKey().map(lambda x:(x[0], list(x[1]))).sortBy(lambda x:x[0]).collect()
return groupedFileList
def canaryMail(errMsg, sender, senderpwd, receiver):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(sender,senderpwd)
msg = errMsg
server.sendmail(sender,receiver,msg)
server.quit()
# --- Functions for save / load files to / from AWS S3---
def s3Uploader(outputbucket, filename):
s3 = boto3.resource('s3')
print '--- [INfO] Now uploading:',filename,'to',outputbucket,'! ---'
s3.meta.client.upload_file(filename,outputbucket,filename)
print '--- [INfO] Now deleting:',filename
os.remove(filename)
def s3Downloader(srcDataBucket,s3_obj_key,download_obj_filename):
s3 = boto3.resource('s3')
bucket = s3.Bucket(srcDataBucket)
bucket.download_file(s3_obj_key, download_obj_filename)
if __name__ == "__main__":
# --- Start main program here ---
# --- Set global variables ---
year = str(sys.argv[1])
month = str(sys.argv[2])
keyID = credencialInfo.keyID()
key = credencialInfo.key()
srcDataBucket = credencialInfo.srcDataBucket()
outputbucket = credencialInfo.outputbucket()
sender = credencialInfo.sender()
senderpwd = credencialInfo.senderpwd()
receiver = credencialInfo.receiver()
sc = SparkContext.getOrCreate()
print '--- [INfO] Execution parameters successfully set! ---'
# --- Locate & group & sort source data files ---
srcFileCollection = []
groupedFileList = []
print '--- [INfO] Now retreating source data files from s3://',srcDataBucket,' ---'
srcFileCollection = awsS3FileWalker(srcDataBucket, year, month)
print '--- [INfO] Successfully retreat file list of ',month,' in ',year,' from ',srcDataBucket,' ---'
print '--- [INfO] Now regrouping and sorting......'
groupedFileList = fileGrouper(srcFileCollection)
print '--- [INfO] Successfully regroup and sort file list of ',month,' in ',year,' ---'
# --- Core loop here ---
dist_user_trans_collection = []
for date, hour in groupedFileList:
everymonth_day = date[8:10]
for h in hour:
everyday_hour = h[11:13]
print '--- [INfO] Now processing: ',h
s3Downloader(srcDataBucket,h,h)
preRenderPickle = loadPickle(h)
flat = sc.parallelize(preRenderPickle)
unique_id_count = flat.map(lambda x:x[0]).distinct().count()
id_trans_rdd = flat.reduceByKey(add).sortBy(lambda x:-x[1])
total_trans_count = id_trans_rdd.map(lambda x:x[1]).sum()
dist_user_trans_collection.append([year,month,everymonth_day,everyday_hour,unique_id_count,total_trans_count])
os.remove(h)
# --- Write & save & upload & wipe result .csv file ---
df = pd.DataFrame(dist_user_trans_collection)
filename = year+'_'+ month+'_distUserTransList.csv'
df.to_csv(filename,index=False,header=['year','month','day','hour','unique_id_count','total_trans_count'])
s3Uploader(outputbucket,filename)
# --- Send ending msg ---
completeMsg = '--- [INFO] Session successfully executed: '+str(year)+' '+str(month)+' def_400_pickle2CSV_distCount ---'
canaryMail(completeMsg,sender,senderpwd,receiver)
print completeMsg
|
mit
|
blueburningcoder/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_tkagg.py
|
69
|
24593
|
# Todd Miller [email protected]
from __future__ import division
import os, sys, math
import Tkinter as Tk, FileDialog
import tkagg # Paint image to Tk photo blitter extension
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
def round(x):
return int(math.floor(x+0.5))
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
import tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
def show():
"""
Show all the figures and enter the gtk mainloop
This should be the last line of your script. This function sets
interactive mode to True, as detailed on
http://matplotlib.sf.net/interactive.html
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
import matplotlib
matplotlib.interactive(True)
if rcParams['tk.pythoninspect']:
os.environ['PYTHONINSPECT'] = '1'
if show._needmain:
Tk.mainloop()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
_focus = windowing.FocusManager()
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Tk.Tk()
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=4)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w/2, h/2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows)
self._master = master
self._tkcanvas.focus_set()
# a dict from func-> cbook.Scheduler threads
self.sourced = dict()
# call the idle handler
def on_idle(*ignore):
self.idle_event()
return True
# disable until you figure out how to handle threads and interrupts
#t = cbook.Idle(on_idle)
#self._tkcanvas.after_idle(lambda *ignore: t.start())
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=width, height=height)
self._tkcanvas.create_image(width/2,height/2,image=self._tkphoto)
self.resize_event()
self.show()
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
self.draw()
self._idle = True
if d: self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, guiEvent=event)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = -1
elif num==5: step = +1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val<256:
key = chr(val)
else:
key = None
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def flush_events(self):
self._master.update()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerTkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The tk.Toolbar
window : The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.window.wm_title("Figure %d" % num)
self.canvas = canvas
self._num = num
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window.minsize(int(w*3/4),int(h*3/4))
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self.window )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2TkAgg( canvas, self.window )
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.show()
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
if not self._shown: self.canvas._tkcanvas.bind("<Destroy>", destroy)
_focus = windowing.FocusManager()
if not self._shown:
self.window.deiconify()
# anim.py requires this
if sys.platform=='win32' : self.window.update()
else:
self.canvas.draw()
self._shown = True
def destroy(self, *args):
if Gcf.get_num_fig_managers()==0 and not matplotlib.is_interactive():
if self.window is not None:
self.window.quit()
if self.window is not None:
#self.toolbar.destroy()
self.window.destroy()
pass
self.window = None
def set_window_title(self, title):
self.window.wm_title(title)
class AxisMenu:
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar(Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
xmin, xmax = canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bLeft = self._Button(
text="Left", file="stock_left.ppm",
command=lambda x=-1: self.panx(x))
self.bRight = self._Button(
text="Right", file="stock_right.ppm",
command=lambda x=1: self.panx(x))
self.bZoomInX = self._Button(
text="ZoomInX",file="stock_zoom-in.ppm",
command=lambda x=1: self.zoomx(x))
self.bZoomOutX = self._Button(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=lambda x=-1: self.zoomx(x))
self.bUp = self._Button(
text="Up", file="stock_up.ppm",
command=lambda y=1: self.pany(y))
self.bDown = self._Button(
text="Down", file="stock_down.ppm",
command=lambda y=-1: self.pany(y))
self.bZoomInY = self._Button(
text="ZoomInY", file="stock_zoom-in.ppm",
command=lambda y=1: self.zoomy(y))
self.bZoomOutY = self._Button(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=lambda y=-1: self.zoomy(y))
self.bSave = self._Button(
text="Save", file="stock_save_as.ppm",
command=self.save_figure)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, direction):
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
def pany(self, direction):
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
def zoomx(self, direction):
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
def zoomy(self, direction):
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
def save_figure(self):
fs = FileDialog.SaveFileDialog(master=self.window,
title='Save the figure')
try:
self.lastDir
except AttributeError:
self.lastDir = os.curdir
fname = fs.go(dir_or_file=self.lastDir) # , pattern="*.png")
if fname is None: # Cancel
return
self.lastDir = os.path.dirname(fname)
try:
self.canvas.print_figure(fname)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_tkpaint(msg)
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
if not hasattr(self, "omenu"):
self.set_active(range(naxes))
self.omenu = AxisMenu(master=self, naxes=naxes)
else:
self.omenu.adjust(naxes)
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
#Tk.Frame.__init__(self, master=self.canvas._tkcanvas)
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height-y0
y1 = height-y1
try: self.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bHome = self._Button( text="Home", file="home.ppm",
command=self.home)
self.bBack = self._Button( text="Back", file="back.ppm",
command = self.back)
self.bForward = self._Button(text="Forward", file="forward.ppm",
command = self.forward)
self.bPan = self._Button( text="Pan", file="move.ppm",
command = self.pan)
self.bZoom = self._Button( text="Zoom",
file="zoom_to_rect.ppm",
command = self.zoom)
self.bsubplot = self._Button( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots)
self.bsave = self._Button( text="Save", file="filesave.ppm",
command = self.save_figure)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self):
from tkFileDialog import asksaveasfilename
from tkMessageBox import showerror
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
fname = asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes = tk_filetypes,
defaultextension = self.canvas.get_default_filetype()
)
if fname == "" or fname == ():
return
else:
try:
# This method will handle the delegation to the correct type
self.canvas.print_figure(fname)
except Exception, e:
showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
#if not hasattr(self, "omenu"):
# self.set_active(range(naxes))
# self.omenu = AxisMenu(master=self, naxes=naxes)
#else:
# self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def dynamic_update(self):
'update drawing area only if idle'
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
FigureManager = FigureManagerTkAgg
|
agpl-3.0
|
madjelan/scikit-learn
|
sklearn/feature_extraction/hashing.py
|
183
|
6155
|
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
|
bsd-3-clause
|
OpenDA-Association/OpenDA
|
course/exercise_black_box_calibration_polution_NOT_WORKING/plot_movie.py
|
1
|
1300
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Plot movie of model simulation output.
Uses directly the output of the model, not the output from OpenDA
@author: verlaanm
"""
import shutil
import numpy as np
import matplotlib.pyplot as plt
from time import sleep
#load data
import reactive_pollution_model_original as ori
import reactive_pollution_model_truth as truth
plt.close("all")
f,ax = plt.subplots(2,1)
plt.ion()
# split sources and outputs based on substance
stypeisone=np.array(ori.source_substance)==1
stypeistwo=np.array(ori.source_substance)==2
sloc1=np.array(ori.source_locations)[stypeisone]
sloc2=np.array(ori.source_locations)[stypeistwo]
otypeisone=np.array(ori.output_substance)==1
otypeistwo=np.array(ori.output_substance)==2
oloc1=np.array(ori.output_locations)[otypeisone]
oloc2=np.array(ori.output_locations)[otypeistwo]
for i in ori.c1_map.keys():
ax[0].clear();
ax[1].clear();
ax[0].plot(ori.c1_map[i],'b')
ax[0].plot(truth.c1_map[i],'k')
ax[0].plot(oloc1,0*oloc1+1,'*')
ax[0].plot(sloc1,0*sloc1+1,'d')
ax[0].set_ylabel("c_1")
ax[1].plot(ori.c2_map[i],'b')
ax[1].plot(truth.c2_map[i],'k')
ax[1].plot(oloc2,0*oloc2+1,'*')
ax[1].plot(sloc2,0*sloc2+1,'d')
ax[1].set_ylabel("c_1")
plt.draw()
sleep(0.1)
plt.savefig("figure_2.png")
|
lgpl-3.0
|
maxrosan/NS-3-support-for-OBS
|
src/core/examples/sample-rng-plot.py
|
188
|
1246
|
# -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
|
gpl-2.0
|
potash/scikit-learn
|
sklearn/model_selection/_split.py
|
7
|
61646
|
"""
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# Raghav R V <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..utils.random import choice
from ..base import _pprint
from ..gaussian_process.kernels import Kernel as GPKernel
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(GroupKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, groups=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_groups for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of groups for any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super(StratifiedKFold, self).split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> lol = LeaveOneGroupOut()
>>> lol.get_n_splits(X, y, groups)
2
>>> print(lol)
LeaveOneGroupOut()
>>> for train_index, test_index in lol.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
# We make a copy of groups to avoid side-effects during iteration
groups = np.array(groups, copy=True)
unique_groups = np.unique(groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
return len(np.unique(groups))
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpl = LeavePGroupsOut(n_groups=2)
>>> lpl.get_n_splits(X, y, groups)
3
>>> print(lpl)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpl.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = np.array(groups, copy=True)
unique_groups = np.unique(groups)
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the test split. If
int, represents the absolute number of test groups. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_splits=5, test_size=0.2, train_size=None,
random_state=None):
super(GroupShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super(
GroupShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> from sklearn.model_selection._split import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super(StratifiedShuffleSplit, self).split(X, y, groups)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and
test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and
train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = cv
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv) # Both iterables and old-cv objects support len
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the groups array.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
|
bsd-3-clause
|
smartscheduling/scikit-learn-categorical-tree
|
doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py
|
256
|
2406
|
"""Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
|
bsd-3-clause
|
Jailander/COSMOS
|
penetrometer_tools/scripts/data_drawer.py
|
1
|
1948
|
#!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import String
import time
from auto_soil_probe.msg import Controller
import matplotlib.pyplot as plt
import numpy as np
class get_penetromter_data(object):
def __init__(self) :
self.n60s=0
self.wpn=0
self.wpfdata=[]
self.wptdata=[]
filename= "Waypoint"+str(self.wpn)+".log"
rospy.on_shutdown(self.on_shutdown)
self.fileo = open(filename, 'w+')
rospy.Subscriber("/auto_soil_probe/data", Controller, self.data_callback)
rospy.spin()
#self.fileo.close()
def data_callback(self, msg):
force= msg.z_axis_force
targ = msg.z_axis_target_position
if targ <= 400.0:
self.n60s+=1
else:
self.n60s=0
if self.n60s < 250:
self.wpfdata.append(force)
self.wptdata.append(msg.header.stamp.secs)
txt = str(msg.header.stamp.secs) + ', ' + str(force) + ', ' + str(msg.z_axis_target_position) + ', ' + str(msg.z_axis_speed) + '\n'
print txt
self.fileo.write(txt)
if self.n60s == 250:
self.create_graph()
self.wpn+=1
self.fileo.close()
filename= "Waypoint"+str(self.wpn)+".log"
self.fileo = open(filename, 'w+')
self.wpfdata=[]
self.wptdata=[]
print filename
def create_graph(self):
plt.plot(self.wptdata, self.wpfdata)
plt.xlabel('time (s)')
plt.ylabel('current')
title="Waypoint"+str(self.wpn)+".png"
plt.title(title)
plt.grid(True)
plt.savefig(title)
plt.clf()
def on_shutdown(self):
self.create_graph()
self.fileo.close()
if __name__ == '__main__':
rospy.init_node('data_logger')
server = get_penetromter_data()
|
mit
|
xuewei4d/scikit-learn
|
benchmarks/bench_plot_nmf.py
|
19
|
15492
|
"""
Benchmarks of Non-Negative Matrix Factorization
"""
# Authors: Tom Dupre la Tour (benchmark)
# Chih-Jen Linn (original projected gradient NMF implementation)
# Anthony Di Franco (projected gradient, Python and NumPy port)
# License: BSD 3 clause
from time import time
import sys
import warnings
import numbers
import numpy as np
import matplotlib.pyplot as plt
from joblib import Memory
import pandas
from sklearn.utils._testing import ignore_warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.decomposition._nmf import _initialize_nmf
from sklearn.decomposition._nmf import _beta_divergence
from sklearn.decomposition._nmf import _check_init
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot, squared_norm
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted, check_non_negative
mem = Memory(cachedir='.', verbose=0)
###################
# Start of _PGNMF #
###################
# This class implements a projected gradient solver for the NMF.
# The projected gradient solver was removed from scikit-learn in version 0.19,
# and a simplified copy is used here for comparison purpose only.
# It is not tested, and it may change or disappear without notice.
def _norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return np.sqrt(squared_norm(x))
def _nls_subproblem(X, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
https://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtX = safe_sparse_dot(W.T, X)
WtW = np.dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtX
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if _norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.",
ConvergenceWarning)
return H, grad, n_iter
def _fit_projected_gradient(X, W, H, tol, max_iter, nls_max_iter, alpha,
l1_ratio):
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
Wt, gradWt, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W, gradW = Wt.T, gradWt.T
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
Wt, _, _ = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W = Wt.T
return W, H, n_iter
class _PGNMF(NMF):
"""Non-Negative Matrix Factorization (NMF) with projected gradient solver.
This class is private and for comparison purpose only.
It may change or disappear without notice.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., nls_max_iter=10):
super().__init__(
n_components=n_components, init=init, solver=solver, tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio)
self.nls_max_iter = nls_max_iter
def fit(self, X, y=None, **params):
self.fit_transform(X, **params)
return self
def transform(self, X):
check_is_fitted(self)
H = self.components_
W, _, self.n_iter_ = self._fit_transform(X, H=H, update_H=False)
return W
def inverse_transform(self, W):
check_is_fitted(self)
return np.dot(W, self.components_)
def fit_transform(self, X, y=None, W=None, H=None):
W, H, self.n_iter = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if (not isinstance(n_components, numbers.Integral) or
n_components <= 0):
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if (not isinstance(self.max_iter, numbers.Integral) or
self.max_iter < 0):
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
# check W and H, or initialize them
if self.init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=self.init,
random_state=self.random_state)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(
X, W, H, self.tol, self.max_iter, self.nls_max_iter,
self.alpha, self.l1_ratio)
else: # transform
Wt, _, n_iter = _nls_subproblem(X.T, H.T, W.T, self.tol,
self.nls_max_iter,
alpha=self.alpha,
l1_ratio=self.l1_ratio)
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it"
" to improve convergence." % self.max_iter,
ConvergenceWarning)
return W, H, n_iter
#################
# End of _PGNMF #
#################
def plot_results(results_df, plot_name):
if results_df is None:
return None
plt.figure(figsize=(16, 6))
colors = 'bgr'
markers = 'ovs'
ax = plt.subplot(1, 3, 1)
for i, init in enumerate(np.unique(results_df['init'])):
plt.subplot(1, 3, i + 1, sharex=ax, sharey=ax)
for j, method in enumerate(np.unique(results_df['method'])):
mask = np.logical_and(results_df['init'] == init,
results_df['method'] == method)
selected_items = results_df[mask]
plt.plot(selected_items['time'], selected_items['loss'],
color=colors[j % len(colors)], ls='-',
marker=markers[j % len(markers)],
label=method)
plt.legend(loc=0, fontsize='x-small')
plt.xlabel("Time (s)")
plt.ylabel("loss")
plt.title("%s" % init)
plt.suptitle(plot_name, fontsize=16)
@ignore_warnings(category=ConvergenceWarning)
# use joblib to cache the results.
# X_shape is specified in arguments for avoiding hashing X
@mem.cache(ignore=['X', 'W0', 'H0'])
def bench_one(name, X, W0, H0, X_shape, clf_type, clf_params, init,
n_components, random_state):
W = W0.copy()
H = H0.copy()
clf = clf_type(**clf_params)
st = time()
W = clf.fit_transform(X, W=W, H=H)
end = time()
H = clf.components_
this_loss = _beta_divergence(X, W, H, 2.0, True)
duration = end - st
return this_loss, duration
def run_bench(X, clfs, plot_name, n_components, tol, alpha, l1_ratio):
start = time()
results = []
for name, clf_type, iter_range, clf_params in clfs:
print("Training %s:" % name)
for rs, init in enumerate(('nndsvd', 'nndsvdar', 'random')):
print(" %s %s: " % (init, " " * (8 - len(init))), end="")
W, H = _initialize_nmf(X, n_components, init, 1e-6, rs)
for max_iter in iter_range:
clf_params['alpha'] = alpha
clf_params['l1_ratio'] = l1_ratio
clf_params['max_iter'] = max_iter
clf_params['tol'] = tol
clf_params['random_state'] = rs
clf_params['init'] = 'custom'
clf_params['n_components'] = n_components
this_loss, duration = bench_one(name, X, W, H, X.shape,
clf_type, clf_params,
init, n_components, rs)
init_name = "init='%s'" % init
results.append((name, this_loss, duration, init_name))
# print("loss: %.6f, time: %.3f sec" % (this_loss, duration))
print(".", end="")
sys.stdout.flush()
print(" ")
# Use a panda dataframe to organize the results
results_df = pandas.DataFrame(results,
columns="method loss time init".split())
print("Total time = %0.3f sec\n" % (time() - start))
# plot the results
plot_results(results_df, plot_name)
return results_df
def load_20news():
print("Loading 20 newsgroups dataset")
print("-----------------------------")
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data)
return tfidf
def load_faces():
print("Loading Olivetti face dataset")
print("-----------------------------")
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces(shuffle=True)
return faces.data
def build_clfs(cd_iters, pg_iters, mu_iters):
clfs = [("Coordinate Descent", NMF, cd_iters, {'solver': 'cd'}),
("Projected Gradient", _PGNMF, pg_iters, {'solver': 'pg'}),
("Multiplicative Update", NMF, mu_iters, {'solver': 'mu'}),
]
return clfs
if __name__ == '__main__':
alpha = 0.
l1_ratio = 0.5
n_components = 10
tol = 1e-15
# first benchmark on 20 newsgroup dataset: sparse, shape(11314, 39116)
plot_name = "20 Newsgroups sparse dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 6)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_20news = load_20news()
run_bench(X_20news, clfs, plot_name, n_components, tol, alpha, l1_ratio)
# second benchmark on Olivetti faces dataset: dense, shape(400, 4096)
plot_name = "Olivetti Faces dense dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 12)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_faces = load_faces()
run_bench(X_faces, clfs, plot_name, n_components, tol, alpha, l1_ratio,)
plt.show()
|
bsd-3-clause
|
tillschumann/nest-simulator
|
examples/nest/plot_tsodyks_depr_fac.py
|
17
|
1135
|
# -*- coding: utf-8 -*-
#
# plot_tsodyks_depr_fac.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy import *
from matplotlib.pylab import *
from matplotlib.mlab import *
def plot_spikes():
dt = 0.1 # time resolution
nbins = 1000
N = 500 # number of neurons
vm = load('voltmeter-4-0.dat')
figure(1)
clf()
plot(vm[:, 0], vm[:, 1], 'r')
xlabel('time / ms')
ylabel('$V_m [mV]$')
savefig('test_tsodyks_depressing.png')
plot_spikes()
show()
|
gpl-2.0
|
anurag313/scikit-learn
|
sklearn/feature_extraction/tests/test_text.py
|
110
|
34127
|
from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
|
bsd-3-clause
|
videlec/sage-flatsurf
|
flatsurf/geometry/similarity_surface.py
|
1
|
105268
|
r"""
Similarity surfaces.
"""
from __future__ import absolute_import, print_function, division
from six.moves import range, map, filter, zip
from six import iteritems
import itertools
from sage.misc.cachefunc import cached_method
from sage.misc.sage_unittest import TestSuite
from sage.structure.sage_object import SageObject
from sage.rings.infinity import Infinity
from sage.rings.all import ZZ, QQ, AA, RIF, RR, NumberField
from sage.modules.free_module_element import vector
from sage.matrix.constructor import matrix, identity_matrix
from sage.modules.free_module import VectorSpace
from .matrix_2x2 import (is_similarity,
homothety_rotation_decomposition,
similarity_from_vectors,
rotation_matrix_angle,
is_cosine_sine_of_rational)
from .similarity import SimilarityGroup
from .polygon import ConvexPolygons, wedge_product, triangulate, build_faces
from .surface import Surface, Surface_dict, Surface_list, LabelComparator
from .surface_objects import Singularity, SaddleConnection, SurfacePoint
from .circle import Circle
ZZ_1 = ZZ.one()
ZZ_2 = ZZ_1 + ZZ_1
class SimilaritySurface(SageObject):
r"""
An oriented surface built from a set of polygons and edges identified with
similarities (i.e. composition of homothety, rotations and translations).
Each polygon is identified with a unique key (its label). The choice of the
label of the polygons is done at startup. If the set is finite then by
default the labels are the first non-negative integers 0,1,...
The edges are labeled by a pair ``(polygon label, edge number)``.
EXAMPLES:
The easiest way to construct a similarity surface is to use the pre-built
constructions from
:class:`flatsurf.geometry.similarity_surface_generators.SimilaritySurfaceGenerators`::
sage: from flatsurf import polygons, similarity_surfaces
sage: P = polygons(vertices=[(0,0), (2,0), (1,4), (0,5)])
sage: similarity_surfaces.self_glued_polygon(P)
HalfTranslationSurface built from 1 polygon
The second way is to build a surface (using e.g. :class:`flatsurf.geometry.surface.Surface_list`)
and then use this surface as an argument for class:`SimilaritySurface`)::
sage: from flatsurf.geometry.similarity_surface import SimilaritySurface
sage: from flatsurf.geometry.surface import Surface_list
sage: P = polygons(vertices=[(0,0), (1,0), (1,1), (0,1)])
sage: Stop = Surface_list(QQ)
sage: Stop.add_polygon(P)
0
sage: Stop.add_polygon(2*P)
1
sage: Stop.add_polygon(3*P)
2
sage: Stop.set_edge_pairing(0, 1, 1, 3)
sage: Stop.set_edge_pairing(0, 0, 2, 2)
sage: Stop.set_edge_pairing(0, 2, 2, 0)
sage: Stop.set_edge_pairing(0, 3, 1, 1)
sage: Stop.set_edge_pairing(1, 2, 2, 1)
sage: Stop.set_edge_pairing(1, 0, 2, 3)
sage: S = SimilaritySurface(Stop)
sage: S
SimilaritySurface built from 3 polygons
To perform a sanity check on the obtained surface, you can run its test
suite::
sage: TestSuite(S).run()
In the following example, we build two broken surfaces and
check that the test suite fails as expected::
sage: P = polygons(vertices=[(0,0), (1,0), (1,1), (0,1)])
sage: Stop = Surface_list(QQ)
sage: Stop.add_polygon(P)
0
sage: S = SimilaritySurface(Stop)
sage: TestSuite(S).run()
...
AssertionError: edge (0, 0) is not glued
------------------------------------------------------------
The following tests failed: _test_gluings
Failure in _test_underlying_surface
The following tests failed: _test_underlying_surface
sage: Stop.set_edge_pairing(0, 0, 0, 3)
sage: Stop.set_edge_pairing(0, 1, 0, 3)
sage: Stop.set_edge_pairing(0, 2, 0, 3)
sage: S = SimilaritySurface(Stop)
sage: TestSuite(S).run()
...
AssertionError: edge gluing is not a pairing:
(0, 0) -> (0, 3) -> (0, 2)
------------------------------------------------------------
The following tests failed: _test_gluings
Failure in _test_underlying_surface
The following tests failed: _test_underlying_surface
Finally, you can also implement a similarity surface by inheriting from
:class:`SimilaritySurface` and implement the methods:
- ``base_ring(self)``: the base ring in which coordinates lives
- ``polygon(self, lab)``: the polygon associated to the label ``lab``
- ``base_label(self)``: which label to use as the base one
- ``opposite_edge(self, lab, edge)``: a pair (``other_label``,
``other_edge``) representing the edge being glued
- ``is_finite(self)``: whether the surface is built from finitely many polygons
"""
def __init__(self, surface):
r"""
TESTS::
sage: from flatsurf.geometry.similarity_surface import SimilaritySurface
sage: SimilaritySurface(3)
Traceback (most recent call last):
...
TypeError: invalid argument surface=3 to build a half-translation surface
"""
if isinstance(surface, SimilaritySurface):
self._s = surface.underlying_surface()
elif isinstance(surface, Surface):
self._s = surface
else:
raise TypeError("invalid argument surface={} to build a half-translation surface".format(surface))
def underlying_surface(self):
r"""
Return the surface underlying this SimilaritySurface.
"""
return self._s
def _test_underlying_surface(self, **options):
is_sub_testsuite = 'tester' in options
tester = self._tester(**options)
tester.info("")
# TODO: this nested subsuite is very fragile since we have no
# way of forwarding the doctests to be skipped... Since
# for now, the unique usage of this is for pickling of
# infinite surface we provide that manually
if not self._s.is_finite():
skip = ['_test_pickling']
else:
skip = []
TestSuite(self._s).run(verbose = tester._verbose,
prefix = tester._prefix + " ",
raise_on_failure=is_sub_testsuite,
skip=skip)
tester.info(tester._prefix + " ", newline=False)
def base_ring(self):
r"""
The field on which the coordinates of ``self`` live.
This method must be overriden in subclasses!
"""
return self._s.base_ring()
def polygon(self, lab):
r"""
Return the polygon with label ``lab``.
"""
return self._s.polygon(lab)
def base_label(self):
r"""
Always returns the same label.
"""
return self._s.base_label()
def opposite_edge(self, l, e=None):
r"""
Given the label ``l`` of a polygon and an edge ``e`` in that polygon
returns the pair (``ll``, ``ee``) to which this edge is glued.
If e is not provided, then it expects the only parameter to be
the pair (``l``,``e``) and will again return a the pair (``ll``,``ee``).
"""
if e is None:
return self._s.opposite_edge(l[0],l[1])
return self._s.opposite_edge(l,e)
def is_finite(self):
r"""
Return whether or not the surface is finite.
"""
return self._s.is_finite()
def is_mutable(self):
r"""
Return if the surface is mutable.
"""
return self._s.is_mutable()
def set_immutable(self):
r"""
Mark the surface as immutable.
"""
self._s.set_immutable()
def is_triangulated(self, limit=None):
return self._s.is_triangulated(limit=limit)
#
# generic methods
#
#def compute_surface_type_from_gluings(self,limit=None):
# r"""
# Compute the surface type by looking at the edge gluings.
# If limit is defined, we try to guess the type by looking at limit many edges.
# """
# if limit is None:
# if not self.is_finite():
# raise ValueError("Need a limit when working with an infinite surface.")
# it = self.edge_iterator()
# label,edge = it.next()
# # Use honest matrices!
# m = SimilaritySurface_generic.edge_matrix(self,label,edge)
# surface_type = surface_type_from_matrix(m)
# for label,edge in it:
# # Use honest matrices!
# m = SimilaritySurface_generic.edge_matrix(self,label,edge)
# surface_type = combine_surface_types(surface_type, surface_type_from_matrix(m))
# return surface_type
# else:
# count=0
# it = self.edge_iterator()
# label,edge = it.next()
# # Use honest matrices!
# m = SimilaritySurface_generic.edge_matrix(self,label,edge)
# surface_type = surface_type_from_matrix(m)
# for label,edge in it:
# # Use honest matrices!
# m = SimilaritySurface_generic.edge_matrix(self,label,edge)
# surface_type = combine_surface_types(surface_type, surface_type_from_matrix(m))
# count=count+1
# if count >= limit:
# return surface_type
# return surface_type
def walker(self):
return self._s.walker()
def label_iterator(self, polygons=False):
r"""
Iterator over all polygon labels.
If the keyword polygons is True then we return pairs (label, polygon)
instead of just labels.
"""
if polygons:
return self._s.label_polygon_iterator()
else:
return self._s.label_iterator()
def edge_iterator(self, gluings=False):
r"""
Iterate over the edges of polygons, which are pairs (l,e) where l is a polygon label, 0 <= e < N and N is the number of edges of the polygon with label l.
If the keyword gluings is set to true, then we iterate over ordered
pairs of edges ((l,e),(ll,ee)) where edge (l,e) is glued to (ll,ee).
EXAMPLES::
sage: from flatsurf import ConvexPolygons
sage: P = ConvexPolygons(QQ)
sage: tri0=P([(1,0),(0,1),(-1,-1)])
sage: tri1=P([(-1,0),(0,-1),(1,1)])
sage: gluings=[((0,0),(1,0)),((0,1),(1,1)),((0,2),(1,2))]
sage: from flatsurf.geometry.surface import surface_list_from_polygons_and_gluings
sage: from flatsurf.geometry.translation_surface import TranslationSurface
sage: s=TranslationSurface(surface_list_from_polygons_and_gluings([tri0,tri1], gluings))
sage: for edge in s.edge_iterator():
....: print(edge)
(0, 0)
(0, 1)
(0, 2)
(1, 0)
(1, 1)
(1, 2)
"""
if gluings:
return self._s.edge_gluing_iterator()
else:
return self._s.edge_iterator()
def num_polygons(self):
r"""
Return the number of polygons.
"""
return self._s.num_polygons()
def num_edges(self):
r"""
Return the total number of edges of all polygons used.
"""
return self._s.num_edges()
def num_singularities(self):
r"""
EXAMPLES::
sage: from flatsurf import *
sage: translation_surfaces.regular_octagon().num_singularities()
1
sage: S = SymmetricGroup(4)
sage: r = S('(1,2)(3,4)')
sage: u = S('(2,3)')
sage: translation_surfaces.origami(r,u).num_singularities()
2
sage: S = SymmetricGroup(8)
sage: r = S('(1,2,3,4,5,6,7,8)')
sage: u = S('(1,8,5,4)(2,3)(6,7)')
sage: translation_surfaces.origami(r,u).num_singularities()
4
"""
if not self.is_finite():
raise ValueError("the method only work for finite surfaces")
# NOTE:
# the very same code is implemented in the method angles (translation
# surfaces). we should factor out the code
edges = set((p,e) for p in self.label_iterator() for e in range(self.polygon(p).num_edges()))
n = ZZ(0)
while edges:
p,e = edges.pop()
n += 1
ee = (e-1) % self.polygon(p).num_edges()
p,e = self.opposite_edge(p,ee)
while (p,e) in edges:
edges.remove((p,e))
ee = (e-1) % self.polygon(p).num_edges()
p,e = self.opposite_edge(p,ee)
return n
def _repr_(self):
if self.num_polygons() == Infinity:
num = 'infinitely many'
else:
num = str(self.num_polygons())
if self.num_polygons() == 1:
end = ""
else:
end = "s"
return "{} built from {} polygon{}".format(self.__class__.__name__, num, end)
def edge_matrix(self, p, e=None):
r"""
Return the edge to which this edge is identified and the matrix to be
applied.
"""
if e is None:
p,e = p
u = self.polygon(p).edge(e)
pp,ee = self.opposite_edge(p,e)
v = self.polygon(pp).edge(ee)
# be careful, because of the orientation, it is -v and not v
return similarity_from_vectors(u,-v)
def edge_transformation(self, p, e):
r"""
Return the similarity bringing the provided edge to the opposite edge.
EXAMPLES::
sage: from flatsurf.geometry.similarity_surface_generators import SimilaritySurfaceGenerators
sage: s = SimilaritySurfaceGenerators.example()
sage: print(s.polygon(0))
Polygon: (0, 0), (2, -2), (2, 0)
sage: print(s.polygon(1))
Polygon: (0, 0), (2, 0), (1, 3)
sage: print(s.opposite_edge(0,0))
(1, 1)
sage: g = s.edge_transformation(0,0)
sage: g((0,0))
(1, 3)
sage: g((2,-2))
(2, 0)
"""
G=SimilarityGroup(self.base_ring())
q=self.polygon(p)
a=q.vertex(e)
b=q.vertex(e+1)
# This is the similarity carrying the origin to a and (1,0) to b:
g=G(b[0]-a[0],b[1]-a[1],a[0],a[1])
pp,ee = self.opposite_edge(p,e)
qq=self.polygon(pp)
# Be careful here: opposite vertices are identified
aa=qq.vertex(ee+1)
bb=qq.vertex(ee)
# This is the similarity carrying the origin to aa and (1,0) to bb:
gg=G(bb[0]-aa[0],bb[1]-aa[1],aa[0],aa[1])
# This is the similarity carrying (a,b) to (aa,bb):
return gg*(~g)
def set_vertex_zero(self, label, v, in_place=False):
r"""
Applies a combinatorial rotation to the polygon with the provided label.
This makes what is currently vertex v of this polygon vertex 0. In other words,
what is currently vertex (or edge) e will now become vertex (e-v)%n where
n is the number of sides of the polygon.
EXAMPLES:
Example with polygon glued to another polygon::
sage: from flatsurf import *
sage: s = translation_surfaces.veech_double_n_gon(4)
sage: s.polygon(0)
Polygon: (0, 0), (1, 0), (1, 1), (0, 1)
sage: [s.opposite_edge(0,i) for i in range(4)]
[(1, 0), (1, 1), (1, 2), (1, 3)]
sage: ss = s.set_vertex_zero(0,1)
sage: ss.polygon(0)
Polygon: (0, 0), (0, 1), (-1, 1), (-1, 0)
sage: [ss.opposite_edge(0,i) for i in range(4)]
[(1, 1), (1, 2), (1, 3), (1, 0)]
sage: TestSuite(ss).run()
Example with polygon glued to self::
sage: s = translation_surfaces.veech_2n_gon(2)
sage: s.polygon(0)
Polygon: (0, 0), (1, 0), (1, 1), (0, 1)
sage: [s.opposite_edge(0,i) for i in range(4)]
[(0, 2), (0, 3), (0, 0), (0, 1)]
sage: ss = s.set_vertex_zero(0,3)
sage: ss.polygon(0)
Polygon: (0, 0), (0, -1), (1, -1), (1, 0)
sage: [ss.opposite_edge(0,i) for i in range(4)]
[(0, 2), (0, 3), (0, 0), (0, 1)]
sage: TestSuite(ss).run()
"""
if in_place:
us = self.underlying_surface()
if not us.is_mutable():
raise ValueError("set_vertex_zero can only be done in_place for a mutable surface.")
p = us.polygon(label)
n=p.num_edges()
assert 0<=v and v<n
glue=[]
P = ConvexPolygons(us.base_ring())
pp = P(edges=[p.edge((i+v)%n) for i in range(n)])
for i in range(n):
e=(v+i)%n
ll,ee = us.opposite_edge(label,e)
if ll==label:
ee = (ee+n-v)%n
glue.append((ll,ee))
us.change_polygon(label,pp,gluing_list=glue)
return self
else:
return self.copy(mutable=True).set_vertex_zero(label,v,in_place=True)
def _label_comparator(self):
r"""
Return a LabelComparator, which provides a fixed total ordering on the polygon labels.
"""
try:
return self._lc
except AttributeError:
self._lc = LabelComparator()
return self._lc
def relabel(self, relabeling_map, in_place=False):
r"""
Attempt to relabel the polygons according to a relabeling_map, which takes as input
a current label and outputs a new label for the same polygon. The method returns a pair
(surface,success) where surface is the relabeled surface, and success is a boolean value
indicating the success of the operation. The operation will fail if the implementation of the
underlying surface does not support labels used in the image of the relabeling map. In this case,
other (arbitrary) labels will be used to replace the labels of the surface, and the resulting
surface should still be okay.
Currently, the relabeling_map must be a dictionary.
If in_place is True then the relabeling is done to the current surface, otherwise a
mutable copy is made before relabeling.
ToDo:
- Allow relabeling_map to be a function rather than just a dictionary.
This will allow it to work for infinite surfaces.
EXAMPLES::
sage: from flatsurf import *
sage: s=translation_surfaces.veech_double_n_gon(5)
sage: ss,valid=s.relabel({0:1,1:2})
sage: valid
True
sage: ss.base_label()
1
sage: ss.opposite_edge(1,0)
(2, 0)
sage: ss.num_polygons()
2
sage: TestSuite(ss).run()
"""
if in_place:
us = self.underlying_surface()
if not us.is_mutable():
raise ValueError("Your surface is not mutable, so can not be relabeled in place.")
if not isinstance(relabeling_map,dict):
raise NotImplementedError("Currently relabeling is only implemented via a dictionary.")
domain=set()
codomain=set()
data={}
for l1,l2 in iteritems(relabeling_map):
p=us.polygon(l1)
glue = []
for e in range(p.num_edges()):
ll,ee = us.opposite_edge(l1,e)
try:
lll=relabeling_map[ll]
except KeyError:
lll=ll
glue.append((lll,ee))
data[l2]=(p,glue)
domain.add(l1)
codomain.add(l2)
if len(domain)!=len(codomain):
raise ValueError("The relabeling_map must be injective. Received "+str(relabeling_map))
changed_labels = domain.intersection(codomain)
added_labels=codomain.difference(domain)
removed_labels=domain.difference(codomain)
# Pass to add_polygons
relabel_errors={}
for l2 in added_labels:
p,glue=data[l2]
l3 = us.add_polygon(p, label=l2)
if not l2==l3:
# This means the label l2 could not be added for some reason.
# Perhaps the implementation does not support this type of label.
# Or perhaps there is already a polygon with this label.
relabel_errors[l2]=l3
# Pass to change polygons
for l2 in changed_labels:
p,glue=data[l2]
# This should always work since the domain of the relabeling map should be labels for polygons.
us.change_polygon(l2,p)
# Deal with the base_label
base_label = us.base_label()
if base_label in relabeling_map:
base_label = relabeling_map[base_label]
if base_label in relabel_errors:
base_label = relabel_errors[base_label]
us.change_base_label(base_label)
# Pass to remove polygons:
for l1 in removed_labels:
us.remove_polygon(l1)
# Pass to update the edge gluings
if len(relabel_errors)==0:
# No problems. Update the gluings.
for l2 in codomain:
p,glue=data[l2]
us.change_polygon_gluings(l2, glue)
else:
# Use the gluings provided by relabel_errors when necessary
for l2 in codomain:
p,glue=data[l2]
for e in range(p.num_edges()):
ll,ee=glue[e]
try:
# First try the error dictionary
us.change_edge_gluing(l2, e, relabel_errors[ll],ee)
except KeyError:
us.change_edge_gluing(l2, e, ll,ee)
return self, len(relabel_errors)==0
else:
return self.copy(mutable=True).relabel(relabeling_map, in_place=True)
def copy(self, relabel=False, mutable=False, lazy=None, new_field=None, optimal_number_field=False):
r"""
Returns a copy of this surface. The method takes several flags to modify how the copy is taken.
If relabel is True, then instead of returning an exact copy, it returns a copy indexed by the
non-negative integers. This uses the Surface_list implementation. If relabel is False (default),
then we return an exact copy. The returned surface uses the Surface_dict implementation.
The mutability flag returns if the resulting surface should be mutable or not. By default, the
resulting surface will not be mutable.
If lazy is True, then the surface is copied by reference. This is the only type of copy
possible for infinite surfaces. The parameter defaults to False for finite surfaces, and
defaults to True for infinite surfaces.
The new_field parameter can be used to place the vertices in a larger field than the basefield
for the original surface.
The optimal_number_field option can be used to find a best NumberField containing the
(necessarily finite) surface.
EXAMPLES::
sage: from flatsurf import *
sage: ss=translation_surfaces.ward(3)
sage: print(ss.is_mutable())
False
sage: s=ss.copy(mutable=True)
sage: print(s.is_mutable())
True
sage: TestSuite(s).run()
sage: print(s==ss)
True
sage: # Changing the base field
sage: from flatsurf import *
sage: s=translation_surfaces.veech_double_n_gon(5)
sage: ss=s.copy(mutable=False,new_field=AA)
sage: TestSuite(ss).run()
sage: ss.base_ring()
Algebraic Real Field
sage: # Optimization of number field
sage: from flatsurf import *
sage: s = translation_surfaces.arnoux_yoccoz(3)
sage: ss = s.copy(new_field=AA).copy(optimal_number_field=True)
sage: TestSuite(ss).run()
sage: ss.base_ring().discriminant()
-44
"""
s = None # This will be the surface we copy. (Likely we will set s=self below.)
if new_field is not None and optimal_number_field:
raise ValueError("You can not set a new_field and also set optimal_number_field=True.")
if optimal_number_field == True:
assert self.is_finite(), "Can only optimize_number_field for a finite surface."
assert not lazy, "Lazy copying is unavailable when optimize_number_field=True."
coordinates_AA = []
for l,p in self.label_iterator(polygons = True):
for e in p.edges():
coordinates_AA.append(AA(e[0]))
coordinates_AA.append(AA(e[1]))
from sage.rings.qqbar import number_field_elements_from_algebraics
field,coordinates_NF,hom = number_field_elements_from_algebraics(coordinates_AA, minimal = True)
if field is QQ:
new_field = QQ
# We pretend new_field = QQ was passed as a parameter.
# It will now get picked up by the "if new_field is not None:" line below.
else:
# Unfortunately field doesn't come with an real embedding (which is given by hom!)
# So, we make a copy of the field, and add the embedding.
field2 = NumberField(field.polynomial(), name = "a", embedding = hom(field.gen()))
# The following converts from field to field2:
hom2 = field.hom(im_gens = [field2.gen()])
ss = Surface_dict(base_ring = field2)
index = 0
P = ConvexPolygons(field2)
for l,p in self.label_iterator(polygons = True):
new_edges = []
for i in range(p.num_edges()):
new_edges.append( (hom2(coordinates_NF[index]), hom2(coordinates_NF[index+1]) ) )
index += 2
pp = P(edges = new_edges)
ss.add_polygon(pp, label = l)
ss.change_base_label(self.base_label())
for (l1,e1),(l2,e2) in self.edge_iterator(gluings = True):
ss.change_edge_gluing(l1, e1, l2, e2)
s = self.__class__(ss)
if not relabel:
if not mutable:
s.set_immutable()
return s
# Otherwise we are supposed to relabel. We will make a relabeled copy of s below.
if new_field is not None:
from flatsurf.geometry.surface import BaseRingChangedSurface
s = BaseRingChangedSurface(self,new_field)
if s is None:
s = self
if s.is_finite():
if relabel:
return self.__class__(Surface_list(surface=s, copy=not lazy, mutable=mutable))
else:
return self.__class__(Surface_dict(surface=s, copy=not lazy, mutable=mutable))
else:
if lazy==False:
raise ValueError("Only lazy copying available for infinite surfaces.")
if self.underlying_surface().is_mutable():
raise ValueError("An infinite surface can only be copied if it is immutable.")
if relabel:
return self.__class__(Surface_list(surface=s, copy=False, mutable=mutable))
else:
return self.__class__(Surface_dict(surface=s, copy=False, mutable=mutable))
def triangle_flip(self, l1, e1, in_place=False, test=False, direction=None):
r"""
Flips the diagonal of the quadrilateral formed by two triangles
glued together along the provided edge (l1,e1). This can be broken
into two steps: join along the edge to form a convex quadilateral,
then cut along the other diagonal. Raises a ValueError if this
quadrilateral would be non-convex. In this case no changes to the
surface are made.
The direction parameter defaults to (0,1). This is used to decide how
the triangles being glued in are labeled. Let p1 be the triangle
associated to label l1, and p2 be the triangle associated to l2
but moved by a similarity to share the edge (l1,e1). Each triangle
has a exactly one separatrix leaving a vertex which travels in the
provided direction or its opposite. (For edges we only count as sepatrices
traveling counter-clockwise around the triangle.) This holds for p1
and p2 and the separatrices must point in opposite directions.
The above description gives two new triangles t1 and t2 which must be
glued in (obtained by flipping the diagonal of the quadrilateral).
Up to swapping t1 and t2 we can assume the separatrix in t1 in the
provided direction (or its opposite) points in the same direction as
that of p1. Further up to cyclic permutation of vertex labels we can
assume that the separatrices in p1 and t1 start at the vertex with the
same index (an element of {0,1,2}). The same can be done for p2 and t2.
We apply the label l1 to t1 and the label l2 to t2. This precisely
determines how t1 and t2 should be used to replace p1 and p2.
INPUT:
- ``l1`` - label of polygon
- ``e1`` - (integer) edge of the polygon
- ``in_place`` (boolean) - If True do the flip to the current surface
which must be mutable. In this case the updated surface will be
returned. Otherwise a mutable copy is made and then an edge is
flipped, which is then returned.
- ``test`` (boolean) - If True we don't actually flip, and we return
True or False depending on whether or not the flip would be
successful.
- ``direction`` (2-dimensional vector) - Defaults to (0,1). The choice
of this vector determines how the newly added triangles are labeled.
EXAMPLES::
sage: from flatsurf import *
sage: s = similarity_surfaces.right_angle_triangle(ZZ(1),ZZ(1))
sage: print(s.polygon(0))
Polygon: (0, 0), (1, 0), (0, 1)
sage: s.triangle_flip(0, 0, test=True)
False
sage: s.triangle_flip(0, 1, test=True)
True
sage: s.triangle_flip(0, 2, test=True)
False
sage: s = similarity_surfaces.right_angle_triangle(ZZ(1),ZZ(1))
sage: from flatsurf.geometry.surface import Surface_list
sage: s = s.__class__(Surface_list(surface=s, mutable=True))
sage: try:
....: s.triangle_flip(0,0,in_place=True)
....: except ValueError as e:
....: print(e)
Gluing triangles along this edge yields a non-convex quadrilateral.
sage: s.triangle_flip(0,1,in_place=True)
ConeSurface built from 2 polygons
sage: s.polygon(0)
Polygon: (0, 0), (1, 1), (0, 1)
sage: s.polygon(1)
Polygon: (0, 0), (-1, -1), (0, -1)
sage: for p in s.edge_iterator(gluings=True):
....: print(p)
((0, 0), (1, 0))
((0, 1), (0, 2))
((0, 2), (0, 1))
((1, 0), (0, 0))
((1, 1), (1, 2))
((1, 2), (1, 1))
sage: try:
....: s.triangle_flip(0,2,in_place=True)
....: except ValueError as e:
....: print(e)
....:
Gluing triangles along this edge yields a non-convex quadrilateral.
sage: p = polygons((2,0),(-1,3),(-1,-3))
sage: s = similarity_surfaces.self_glued_polygon(p)
sage: from flatsurf.geometry.surface import Surface_list
sage: s = s.__class__(Surface_list(surface=s,mutable=True))
sage: s.triangle_flip(0,1,in_place=True)
HalfTranslationSurface built from 1 polygon
sage: for x in s.label_iterator(polygons=True):
....: print(x)
(0, Polygon: (0, 0), (-3, -3), (-1, -3))
sage: for x in s.edge_iterator(gluings=True):
....: print(x)
((0, 0), (0, 0))
((0, 1), (0, 1))
((0, 2), (0, 2))
sage: TestSuite(s).run()
"""
if test:
# Just test if the flip would be successful
p1=self.polygon(l1)
if not p1.num_edges()==3:
return false
l2,e2 = self.opposite_edge(l1,e1)
p2 = self.polygon(l2)
if not p2.num_edges()==3:
return false
sim = self.edge_transformation(l2,e2)
hol = sim( p2.vertex( (e2+2)%3 ) - p1.vertex((e1+2)%3) )
from flatsurf.geometry.polygon import wedge_product
return wedge_product(p1.edge((e1+2)%3), hol) > 0 and \
wedge_product(p1.edge((e1+1)%3), hol) > 0
if in_place:
s=self
assert s.is_mutable(), "Surface must be mutable for in place triangle_flip."
else:
s=self.copy(mutable=True)
p1=s.polygon(l1)
if not p1.num_edges()==3:
raise ValueError("The polygon with the provided label is not a triangle.")
l2,e2 = s.opposite_edge(l1,e1)
sim = s.edge_transformation(l2,e2)
m = sim.derivative()
p2=s.polygon(l2)
if not p2.num_edges()==3:
raise ValueError("The polygon opposite the provided edge is not a triangle.")
P=p1.parent()
p2=P(vertices=[sim(v) for v in p2.vertices()])
if direction is None:
direction=s.vector_space()((0,1))
# Get vertices corresponding to separatices in the provided direction.
v1=p1.find_separatrix(direction=direction)[0]
v2=p2.find_separatrix(direction=direction)[0]
# Our quadrilateral has vertices labeled:
# * 0=p1.vertex(e1+1)=p2.vertex(e2)
# * 1=p1.vertex(e1+2)
# * 2=p1.vertex(e1)=p2.vertex(e2+1)
# * 3=p2.vertex(e2+2)
# Record the corresponding vertices of this quadrilateral.
q1 = (3+v1-e1-1)%3
q2 = (2+(3+v2-e2-1)%3)%4
new_diagonal=p2.vertex((e2+2)%3)-p1.vertex((e1+2)%3)
# This list will store the new triangles which are being glued in.
# (Unfortunately, they may not be cyclically labeled in the correct way.)
new_triangle=[]
try:
new_triangle.append(P(edges=[p1.edge((e1+2)%3),p2.edge((e2+1)%3),-new_diagonal]))
new_triangle.append(P(edges=[p2.edge((e2+2)%3),p1.edge((e1+1)%3),new_diagonal]))
# The above triangles would be glued along edge 2 to form the diagonal of the quadrilateral being removed.
except ValueError:
raise ValueError("Gluing triangles along this edge yields a non-convex quadrilateral.")
# Find the separatrices of the two new triangles, and in particular which way they point.
new_sep=[]
new_sep.append(new_triangle[0].find_separatrix(direction=direction)[0])
new_sep.append(new_triangle[1].find_separatrix(direction=direction)[0])
# The quadrilateral vertices corresponding to these separatrices are
# new_sep[0]+1 and (new_sep[1]+3)%4 respectively.
# i=0 if the new_triangle[0] should be labeled l1 and new_triangle[1] should be labeled l2.
# i=1 indicates the opposite labeling.
if new_sep[0]+1==q1:
# For debugging:
assert (new_sep[1]+3)%4==q2, \
"Bug: new_sep[1]="+str(new_sep[1])+" and q2="+str(q2)
i=0
else:
# For debugging:
assert (new_sep[1]+3)%4==q1
assert new_sep[0]+1==q2
i=1
# These quantities represent the cyclic relabeling of triangles needed.
cycle1 = (new_sep[i]-v1+3)%3
cycle2 = (new_sep[1-i]-v2+3)%3
# This will be the new triangle with label l1:
tri1=P(edges=[new_triangle[i].edge(cycle1), \
new_triangle[i].edge((cycle1+1)%3), \
new_triangle[i].edge((cycle1+2)%3)])
# This will be the new triangle with label l2:
tri2=P(edges=[new_triangle[1-i].edge(cycle2), \
new_triangle[1-i].edge((cycle2+1)%3), \
new_triangle[1-i].edge((cycle2+2)%3)])
# In the above, edge 2-cycle1 of tri1 would be glued to edge 2-cycle2 of tri2
diagonal_glue_e1=2-cycle1
diagonal_glue_e2=2-cycle2
# FOR CATCHING BUGS:
assert p1.find_separatrix(direction=direction)==tri1.find_separatrix(direction=direction)
assert p2.find_separatrix(direction=direction)==tri2.find_separatrix(direction=direction)
# Two opposite edges will not change their labels (label,edge) under our regluing operation.
# The other two opposite ones will change and in fact they change labels.
# The following finds them (there are two cases).
# At the end of the if statement, the following will be true:
# * new_glue_e1 and new_glue_e2 will be the edges of the new triangle with label l1 and l2 which need regluing.
# * old_e1 and old_e2 will be the corresponding edges of the old triangles.
# (Note that labels are swapped between the pair. The appending 1 or 2 refers to the label used for the triangle.)
if p1.edge(v1)==tri1.edge(v1):
# We don't have to worry about changing gluings on edge v1 of the triangles with label l1
# We do have to worry about the following edge:
new_glue_e1=3-diagonal_glue_e1-v1 # returns the edge which is neither diagonal_glue_e1 nor v1.
# This corresponded to the following old edge:
old_e1 = 3 - e1 - v1 # Again this finds the edge which is neither e1 nor v1
else:
temp = (v1+2)%3
# FOR CATCHING BUGS:
assert p1.edge(temp)==tri1.edge(temp)
# We don't have to worry about changing gluings on edge (v1+2)%3 of the triangles with label l1
# We do have to worry about the following edge:
new_glue_e1=3-diagonal_glue_e1-temp # returns the edge which is neither diagonal_glue_e1 nor temp.
# This corresponded to the following old edge:
old_e1 = 3 - e1 - temp # Again this finds the edge which is neither e1 nor temp
if p2.edge(v2)==tri2.edge(v2):
# We don't have to worry about changing gluings on edge v2 of the triangles with label l2
# We do have to worry about the following edge:
new_glue_e2=3-diagonal_glue_e2-v2 # returns the edge which is neither diagonal_glue_e2 nor v2.
# This corresponded to the following old edge:
old_e2 = 3 - e2 - v2 # Again this finds the edge which is neither e2 nor v2
else:
temp = (v2+2)%3
# FOR CATCHING BUGS:
assert p2.edge(temp)==tri2.edge(temp)
# We don't have to worry about changing gluings on edge (v2+2)%3 of the triangles with label l2
# We do have to worry about the following edge:
new_glue_e2=3-diagonal_glue_e2-temp # returns the edge which is neither diagonal_glue_e2 nor temp.
# This corresponded to the following old edge:
old_e2 = 3 - e2 - temp # Again this finds the edge which is neither e2 nor temp
# remember the old gluings.
old_opposite1 = s.opposite_edge(l1, old_e1)
old_opposite2 = s.opposite_edge(l2, old_e2)
# We make changes to the underlying surface
us=s.underlying_surface()
# Replace the triangles.
us.change_polygon(l1,tri1)
us.change_polygon(l2,tri2)
# Glue along the new diagonal of the quadrilateral
us.change_edge_gluing(l1,diagonal_glue_e1,
l2,diagonal_glue_e2)
# Now we deal with that pair of opposite edges of the quadrilateral that need regluing.
# There are some special cases:
if old_opposite1==(l2,old_e2):
# These opposite edges were glued to each other.
# Do the same in the new surface:
us.change_edge_gluing(l1,new_glue_e1,
l2,new_glue_e2)
else:
if old_opposite1==(l1,old_e1):
# That edge was "self-glued".
us.change_edge_gluing(l2,new_glue_e2,
l2,new_glue_e2)
else:
# The edge (l1,old_e1) was glued in a standard way.
# That edge now corresponds to (l2,new_glue_e2):
us.change_edge_gluing(l2,new_glue_e2,
old_opposite1[0],old_opposite1[1])
if old_opposite2==(l2,old_e2):
# That edge was "self-glued".
us.change_edge_gluing(l1,new_glue_e1,
l1,new_glue_e1)
else:
# The edge (l2,old_e2) was glued in a standard way.
# That edge now corresponds to (l1,new_glue_e1):
us.change_edge_gluing(l1,new_glue_e1,
old_opposite2[0],old_opposite2[1])
return s
def join_polygons(self, p1, e1, test=False, in_place=False):
r"""
Join polygons across the provided edge (p1,e1). By default,
it returns the surface obtained by joining the two polygons
together. It raises a ValueError if gluing the two polygons
together results in a non-convex polygon. This is done to the
current surface if in_place is True, and otherwise a mutable
copy is made and then modified.
If test is True then instead of changing the surface, it just
checks to see if the change would be successful and returns
True if successful or False if not.
EXAMPLES::
sage: from flatsurf import *
sage: ss=translation_surfaces.ward(3)
sage: s=ss.copy(mutable=True)
sage: s.join_polygons(0,0, in_place=True)
TranslationSurface built from 2 polygons
sage: print(s.polygon(0))
Polygon: (0, 0), (1, -a), (2, 0), (3, a), (2, 2*a), (0, 2*a), (-1, a)
sage: s.join_polygons(0,4, in_place=True)
TranslationSurface built from 1 polygon
sage: print(s.polygon(0))
Polygon: (0, 0), (1, -a), (2, 0), (3, a), (2, 2*a), (1, 3*a), (0, 2*a), (-1, a)
"""
poly1=self.polygon(p1)
p2,e2 = self.opposite_edge(p1,e1)
poly2=self.polygon(p2)
if p1==p2:
if test:
return False
else:
raise ValueError("Can't glue polygon to itself.")
t=self.edge_transformation(p2, e2)
dt=t.derivative()
vs = []
edge_map={} # Store the pairs for the old edges.
for i in range(e1):
edge_map[len(vs)]=(p1,i)
vs.append(poly1.edge(i))
ne=poly2.num_edges()
for i in range(1,ne):
ee=(e2+i)%ne
edge_map[len(vs)]=(p2,ee)
vs.append(dt * poly2.edge( ee ))
for i in range(e1+1, poly1.num_edges()):
edge_map[len(vs)]=(p1,i)
vs.append(poly1.edge(i))
try:
new_polygon = ConvexPolygons(self.base_ring())(vs)
except (ValueError, TypeError):
if test:
return False
else:
raise ValueError("Joining polygons along this edge results in a non-convex polygon.")
if test:
# Gluing would be successful
return True
# Now no longer testing. Do the gluing.
if in_place:
ss=self
else:
ss=self.copy(mutable=True)
s=ss.underlying_surface()
inv_edge_map={}
for key, value in iteritems(edge_map):
inv_edge_map[value]=(p1,key)
glue_list=[]
for i in range(len(vs)):
p3,e3 = edge_map[i]
p4,e4 = self.opposite_edge(p3,e3)
if p4 == p1 or p4 == p2:
glue_list.append(inv_edge_map[(p4,e4)])
else:
glue_list.append((p4,e4))
if s.base_label()==p2:
s.change_base_label(p1)
s.remove_polygon(p2)
s.change_polygon(p1, new_polygon, glue_list)
return ss
def subdivide_polygon(self, p, v1, v2, test=False, new_label=None):
r"""
Cut the polygon with label p along the diagonal joining vertex
v1 to vertex v2. This cuts p into two polygons, one will keep the same
label. The other will get a new label, which can be provided
via new_label. Otherwise a default new label will be provided.
If test=False, then the surface will be changed (in place). If
test=True, then it just checks to see if the change would be successful
The convention is that the resulting subdivided polygon which has an oriented
edge going from the original vertex v1 to vertex v2 will keep the label p.
The other polygon will get a new label.
The change will be done in place.
"""
poly=self.polygon(p)
ne=poly.num_edges()
if v1<0 or v2<0 or v1>=ne or v2>=ne:
if test:
return False
else:
raise ValueError('Provided vertices out of bounds.')
if abs(v1-v2)<=1 or abs(v1-v2)>=ne-1:
if test:
return False
else:
raise ValueError('Provided diagonal is not actually a diagonal.')
if v2<v1:
v2=v2+ne
newedges1=[poly.vertex(v2)-poly.vertex(v1)]
for i in range(v2, v1+ne):
newedges1.append(poly.edge(i))
newpoly1 = ConvexPolygons(self.base_ring())(newedges1)
newedges2=[poly.vertex(v1)-poly.vertex(v2)]
for i in range(v1,v2):
newedges2.append(poly.edge(i))
newpoly2 = ConvexPolygons(self.base_ring())(newedges2)
# Store the old gluings
old_gluings = {(p,i): self.opposite_edge(p,i) for i in range(ne)}
# Update the polygon with label p, add a new polygon.
self.underlying_surface().change_polygon(p, newpoly1)
if new_label is None:
new_label = self.underlying_surface().add_polygon(newpoly2)
else:
new_label = self.underlying_surface().add_polygon(newpoly2, label=new_label)
# This gluing is the diagonal we used.
self.underlying_surface().change_edge_gluing(p, 0, new_label, 0)
# Setup conversion from old to new labels.
old_to_new_labels={}
for i in range(v1, v2):
old_to_new_labels[(p,i%ne)]=(new_label,i-v1+1)
for i in range(v2, ne+v1):
old_to_new_labels[(p,i%ne)]=(p,i-v2+1)
for e in range(1, newpoly1.num_edges()):
pair = old_gluings[(p,(v2+e-1)%ne)]
if pair in old_to_new_labels:
pair = old_to_new_labels[pair]
self.underlying_surface().change_edge_gluing(p, e, pair[0], pair[1])
for e in range(1, newpoly2.num_edges()):
pair = old_gluings[(p,(v1+e-1)%ne)]
if pair in old_to_new_labels:
pair = old_to_new_labels[pair]
self.underlying_surface().change_edge_gluing(new_label, e, pair[0], pair[1])
def singularity(self, l, v, limit=None):
r"""
Represents the Singularity associated to the v-th vertex of the polygon with
label l.
If the surface is infinite, the limit needs to be set. In this case the construction
of the singularity is successful if the sequence of vertices hit by passing through
edges closes up in limit or less steps.
EXAMPLES::
sage: from flatsurf import *
sage: s = translation_surfaces.square_torus()
sage: pc = s.minimal_cover(cover_type="planar")
sage: pc.singularity(pc.base_label(),0)
Traceback (most recent call last):
...
ValueError: need a limit when working with an infinite surface
sage: pc.singularity(pc.base_label(),0,limit=4)
singularity with vertex equivalence class frozenset(...)
"""
return Singularity(self,l,v,limit)
def point(self, label, point, ring=None, limit=None):
r"""
Return a point in this surface.
INPUT:
- ``label`` - label of the polygon
- ``point`` - coordinates of the point inside the polygon
- ``ring`` (optional) - a ring for the coordinates
- ``limit`` (optional) - undocumented (only necessary if the point corresponds
to a singularity in an infinite surface)
EXAMPLES::
sage: from flatsurf import *
sage: s = translation_surfaces.square_torus()
sage: pc = s.minimal_cover(cover_type="planar")
sage: pc.surface_point(pc.base_label(),(0,0))
Traceback (most recent call last):
...
ValueError: need a limit when working with an infinite surface
sage: pc.surface_point(pc.base_label(),(1,0),limit=4)
Surface point with 4 coordinate representations
sage: z = pc.surface_point(pc.base_label(),(sqrt(2)-1,sqrt(3)-1),ring=AA)
sage: next(iter(z.coordinates(z.labels()[0]))).parent()
Vector space of dimension 2 over Algebraic Real Field
"""
return SurfacePoint(self, label, point, ring=ring, limit=limit)
# TODO: deprecate
surface_point = point
def ramified_cover(self, degree, data):
r"""
Build a ramified cover of this surface with given ``degree`` and ramification ``data``.
INPUT:
- ``degree`` (integer) -- the degree of the cover
- ``data`` -- dictionary that associates a pair ``(polygon_label, edge_number)`` a permutation
of ``{1, 2, ..., d}``
EXAMPLES:
The L-shape origami::
sage: import flatsurf
sage: T = flatsurf.translation_surfaces.square_torus()
sage: T.ramified_cover(3, {(0,0): '(1,2)', (0,1): '(1,3)'})
TranslationSurface built from 3 polygons
sage: O = T.ramified_cover(3, {(0,0): '(1,2)', (0,1): '(1,3)'})
sage: O.stratum()
H_2(2)
TESTS::
sage: import flatsurf
sage: T = flatsurf.translation_surfaces.square_torus()
sage: T.ramified_cover(3, {(0,0): '(1,2)', (0,2): '(1,3)'})
Traceback (most recent call last):
...
ValueError: inconsistent covering data
"""
if not self.is_finite():
raise ValueError("this method is only available for finite surfaces")
return type(self)(self._s.ramified_cover(degree, data))
def minimal_cover(self, cover_type = "translation"):
r"""
Return the minimal translation or half-translation cover of the surface.
Cover type may be either "translation", "half-translation" or "planar".
The minimal planar cover of a surface S is the smallest cover C so that
the developing map from the universal cover U to the plane induces a
well defined map from C to the plane. This is an infinite translation
surface that is naturally a branched cover of the plane.
EXAMPLES::
sage: from flatsurf.geometry.surface import Surface_list
sage: s = Surface_list(QQ)
sage: from flatsurf.geometry.polygon import polygons
sage: square = polygons.square(field=QQ)
sage: s.add_polygon(square)
0
sage: s.change_edge_gluing(0,0,0,1)
sage: s.change_edge_gluing(0,2,0,3)
sage: from flatsurf.geometry.cone_surface import ConeSurface
sage: cs = ConeSurface(s)
sage: ts = cs.minimal_cover(cover_type="translation")
sage: ts
TranslationSurface built from 4 polygons
sage: hts = cs.minimal_cover(cover_type="half-translation")
sage: hts
HalfTranslationSurface built from 2 polygons
sage: TestSuite(hts).run()
sage: ps = cs.minimal_cover(cover_type="planar")
sage: ps
TranslationSurface built from infinitely many polygons
sage: TestSuite(ps).run(skip="_test_pickling")
sage: from flatsurf import *
sage: S = similarity_surfaces.example()
sage: T = S.minimal_cover(cover_type="translation")
sage: T
TranslationSurface built from infinitely many polygons
sage: T.polygon(T.base_label())
Polygon: (0, 0), (2, -2), (2, 0)
"""
if cover_type == "translation":
from flatsurf.geometry.translation_surface import TranslationSurface
from flatsurf.geometry.minimal_cover import MinimalTranslationCover
return TranslationSurface(MinimalTranslationCover(self))
if cover_type == "half-translation":
from flatsurf.geometry.half_translation_surface import HalfTranslationSurface
from flatsurf.geometry.minimal_cover import MinimalHalfTranslationCover
return HalfTranslationSurface(MinimalHalfTranslationCover(self))
if cover_type == "planar":
from flatsurf.geometry.translation_surface import TranslationSurface
from flatsurf.geometry.minimal_cover import MinimalPlanarCover
return TranslationSurface(MinimalPlanarCover(self))
raise ValueError("Provided cover_type is not supported.")
def minimal_translation_cover(self):
r"""
Return the minimal translation cover.
"Be careful that if the surface is not built from one polygon, this is
not the smallest translation cover of the surface." - Vincent
"I disagree with the prior statement. Can you provide an example?" -Pat
"""
from sage.misc.superseded import deprecation
deprecation(13109, "minimal_translation_cover is deprecated. Use minimal_cover(cover_type = \"translation\") instead.")
from flatsurf.geometry.translation_surface import MinimalTranslationCover, TranslationSurface
return TranslationSurface(MinimalTranslationCover(self))
def vector_space(self):
r"""
Return the vector space in which self naturally embeds.
"""
from sage.modules.free_module import VectorSpace
return VectorSpace(self.base_ring(), 2)
def fundamental_group(self, base_label=None):
r"""
Return the fundamental group of this surface.
"""
if not self.is_finite():
raise ValueError("the method only work for finite surfaces")
if base_label is None:
base_label = self.base_label()
from .fundamental_group import FundamentalGroup
return FundamentalGroup(self, base_label)
def tangent_bundle(self, ring=None):
r"""
Return the tangent bundle
INPUT:
- ``ring`` -- an optional field (defaults to the coordinate field of the
surface)
"""
if ring is None:
ring = self.base_ring()
try:
return self._tangent_bundle_cache[ring]
except AttributeError:
self._tangent_bundle_cache = {}
except KeyError:
pass
from .tangent_bundle import SimilaritySurfaceTangentBundle
self._tangent_bundle_cache[ring] = SimilaritySurfaceTangentBundle(self, ring)
return self._tangent_bundle_cache[ring]
def tangent_vector(self, lab, p, v, ring=None):
r"""
Return a tangent vector.
INPUT:
- ``lab`` -- label of a polygon
- ``p`` -- coordinates of a point in the polygon
- ``v`` -- coordinates of a vector in R^2
EXAMPLES::
sage: from flatsurf.geometry.chamanara import chamanara_surface
sage: S = chamanara_surface(1/2)
sage: S.tangent_vector(S.base_label(), (1/2,1/2), (1,1))
SimilaritySurfaceTangentVector in polygon (1, -1, 0) based at (1/2, -3/2) with vector (1, 1)
sage: K.<sqrt2> = QuadraticField(2)
sage: S.tangent_vector(S.base_label(), (1/2,1/2), (1,sqrt2), ring=K)
SimilaritySurfaceTangentVector in polygon (1, -1, 0) based at (1/2, -3/2) with vector (1, sqrt2)
"""
p = vector(p)
v = vector(v)
if p.parent().dimension() != 2 or v.parent().dimension() != 2:
raise ValueError("p (={!r}) and v (={!v}) should have two coordinates")
if ring is None:
ring = self.base_ring()
try:
return self.tangent_bundle(ring)(lab, p, v)
except TypeError:
raise TypeError("Use the ring=??? option to construct tangent vectors in other field different from the base_ring().")
# Old version seemed to be to accepting of inputs (eg, from Symbolic Ring)
#R = p.base_ring()
#if R != v.base_ring():
# from sage.structure.element import get_coercion_model
# cm = get_coercion_model()
# R = cm.common_parent(R, v.base_ring())
# p = p.change_ring(R)
# v = v.change_ring(R)
#R2 = self.base_ring()
#if R != R2:
# if R2.has_coerce_map_from(R):
# p = p.change_ring(R2)
# v = v.change_ring(R2)
# R = R2
# elif not R.has_coerce_map_from(R2):
# raise ValueError("not able to find a common ring for arguments")
#return self.tangent_bundle(R)(lab, p, v)
else:
return self.tangent_bundle(ring)(lab, p, v)
def reposition_polygons(self, in_place=False, relabel=False):
r"""
We choose a maximal tree in the dual graph of the decomposition into
polygons, and ensure that the gluings between two polygons joined by
an edge in this tree is by translation.
This guarantees that the group generated by the edge identifications is
minimal among representions of the surface. In particular, if for instance
you have a translation surface which is anot representable as a translation
surface (because polygons are presented with rotations) then after this
change it will be representable as a translation surface.
"""
if not self.is_finite():
raise NotImplementedError("Only implemented for finite surfaces.")
if in_place:
if not self.is_mutable():
raise ValueError("reposition_polygons in_place is only available "+\
"for mutable surfaces.")
s=self
else:
s=self.copy(relabel=relabel, mutable=True)
w=s.walker()
from flatsurf.geometry.similarity import SimilarityGroup
S=SimilarityGroup(self.base_ring())
identity=S.one()
it = iter(w)
label = next(it)
changes = {label:identity}
for label in it:
edge = w.edge_back(label)
label2,edge2 = s.opposite_edge(label, edge)
changes[label] = changes[label2] * s.edge_transformation(label,edge)
it = iter(w)
# Skip the base label:
label = next(it)
for label in it:
p = s.polygon(label)
p = changes[label].derivative()*p
s.underlying_surface().change_polygon(label,p)
return s
def triangulation_mapping(self):
r"""
Return a SurfaceMapping triangulating the suface or None if the surface is already triangulated.
"""
from flatsurf.geometry.mappings import triangulation_mapping
return triangulation_mapping(self)
def triangulate(self, in_place=False, label = None, relabel=False):
r"""
Return a triangulated version of this surface. (This may be mutable
or not depending on the input.)
If label=None (as default) all polygons are triangulated. Otherwise,
label should be a polygon label. In this case, just this polygon
is split into triangles.
This is done in place if in_place is True (defaults to False).
If we are not doing triangulation in_place, then we must make a copy.
This can be a relabeled copy (indexed by the non-negative ints)
or a label preserving copy. The copy is relabeled if relabel=True
(default False).
EXAMPLES::
sage: from flatsurf import *
sage: s=translation_surfaces.mcmullen_L(1,1,1,1)
sage: ss=s.triangulate()
sage: gs=ss.graphical_surface()
sage: gs.make_all_visible()
sage: print(gs)
Graphical version of Similarity Surface TranslationSurface built from 6 polygons
A non-strictly convex example that caused trouble:
sage: from flatsurf import *
sage: s=similarity_surfaces.self_glued_polygon(polygons(edges=[(1,1),(-3,-1),(1,0),(1,0)]))
sage: s=s.triangulate()
sage: s.polygon(0).num_edges()
3
"""
if label is None:
# We triangulate the whole surface
if self.is_finite():
# Store the current labels.
labels = [label for label in self.label_iterator()]
if in_place:
s=self
else:
s=self.copy(mutable=True)
# Subdivide each polygon in turn.
for l in labels:
s = s.triangulate(in_place=True, label=l)
return s
else:
if in_place:
raise ValueError("You can't triangulate an infinite surface in place.")
from flatsurf.geometry.delaunay import LazyTriangulatedSurface
return self.__class__(LazyTriangulatedSurface(self))
else:
poly = self.polygon(label)
n=poly.num_edges()
if n>3:
if in_place:
s=self
else:
s=self.copy(mutable=True)
else:
# This polygon is already a triangle.
return self
from flatsurf.geometry.polygon import wedge_product
for i in range(n-3):
poly = s.polygon(label)
n=poly.num_edges()
for i in range(n):
e1=poly.edge(i)
e2=poly.edge((i+1)%n)
if wedge_product(e1,e2) != 0:
# This is in case the polygon is a triangle with subdivided edge.
e3=poly.edge((i+2)%n)
if wedge_product(e1+e2,e3) != 0:
s.subdivide_polygon(label,i,(i+2)%n)
break
return s
raise RuntimeError("Failed to return anything!")
def _edge_needs_flip(self,p1,e1):
r"""
Returns -1 if the the provided edge incident to two triangles which
should be flipped to get closer to the Delaunay decomposition.
Returns 0 if the quadrilateral formed by the triangles is inscribed
in a circle, and returns 1 otherwise.
A ValueError is raised if the edge is not indident to two triangles.
"""
p2,e2=self.opposite_edge(p1,e1)
poly1=self.polygon(p1)
poly2=self.polygon(p2)
if poly1.num_edges()!=3 or poly2.num_edges()!=3:
raise ValueError("Edge must be adjacent to two triangles.")
from flatsurf.geometry.matrix_2x2 import similarity_from_vectors
sim1=similarity_from_vectors(poly1.edge(e1+2),-poly1.edge(e1+1))
sim2=similarity_from_vectors(poly2.edge(e2+2),-poly2.edge(e2+1))
sim=sim1*sim2
return sim[1][0]<0
def _edge_needs_join(self,p1,e1):
r"""
Returns -1 if the the provided edge incident to two triangles which
should be flipped to get closer to the Delaunay decomposition.
Returns 0 if the quadrilateral formed by the triangles is inscribed
in a circle, and returns 1 otherwise.
A ValueError is raised if the edge is not indident to two triangles.
"""
p2,e2=self.opposite_edge(p1,e1)
poly1=self.polygon(p1)
poly2=self.polygon(p2)
from flatsurf.geometry.matrix_2x2 import similarity_from_vectors
sim1=similarity_from_vectors(poly1.vertex(e1) - poly1.vertex(e1+2),\
-poly1.edge(e1+1))
sim2=similarity_from_vectors(poly2.vertex(e2) - poly2.vertex(e2+2),\
-poly2.edge(e2+1))
sim=sim1*sim2
from sage.functions.generalized import sgn
return sim[1][0]==0
def delaunay_single_flip(self):
r"""
Does a single in place flip of a triangulated mutable surface.
"""
if not self.is_finite():
raise NotImplementedError("Not implemented for infinite surfaces.")
lc = self._label_comparator()
for (l1,e1),(l2,e2) in self.edge_iterator(gluings=True):
if (lc.lt(l1,l2) or (l1==l2 and e1<=e2)) and self._edge_needs_flip(l1,e1):
self.triangle_flip(l1, e1, in_place=True)
return True
return False
def is_delaunay_triangulated(self, limit=None):
r"""
Return if the surface is triangulated and the triangulation is Delaunay.
If limit is set, then it checks this only limit many edges.
Limit must be set for infinite surfaces.
"""
if limit is None:
if not self.is_finite():
raise NotImplementedError("A limit must be set for infinite surfaces.")
limit = self.num_edges()
count = 0
for (l1,e1),(l2,e2) in self.edge_iterator(gluings=True):
if count >= limit:
break
count = count+1
if self.polygon(l1).num_edges()!=3:
print("Polygon with label "+str(l1)+" is not a triangle.")
return False
if self.polygon(l2).num_edges()!=3:
print("Polygon with label "+str(l2)+" is not a triangle.")
return False
if self._edge_needs_flip(l1,e1):
print("Edge "+str((l1,e1))+" needs to be flipped.")
print("This edge is glued to "+str((l2,e2))+".")
return False
return True
def is_delaunay_decomposed(self, limit=None):
r"""
Return if the decomposition of the surface into polygons is Delaunay.
If limit is set, then it checks this only limit many polygons.
Limit must be set for infinite surfaces.
"""
if limit is None:
if not self.is_finite():
raise NotImplementedError("A limit must be set for infinite surfaces.")
limit = self.num_polygons()
count = 0
for (l1,p1) in self.label_iterator(polygons=True):
try:
c1=p1.circumscribing_circle()
except ValueError:
# p1 is not circumscribed
return False
for e1 in range(p1.num_edges()):
c2=self.edge_transformation(l1,e1)*c1
l2,e2=self.opposite_edge(l1,e1)
if c2.point_position(self.polygon(l2).vertex(e2+2))!=-1:
# The circumscribed circle developed into the adjacent polygon
# contains a vertex in its interior or boundary.
return False
return True
def delaunay_triangulation(self, triangulated=False, in_place=False, limit=None, direction=None, relabel=False):
r"""
Returns a Delaunay triangulation of a surface, or make some
triangle flips to get closer to the Delaunay decomposition.
INPUT:
- ``triangulated`` (boolean) - If true, the algorithm assumes the
surface is already triangulated. It does this without verification.
- ``in_place`` (boolean) - If true, the triangulating and the
triangle flips are done in place. Otherwise, a mutable copy of the
surface is made.
- ``limit`` (None or Integer) - If None, this will return a
Delaunay triangulation. If limit is an integer 1 or larger, then at
most limit many diagonal flips will be done.
- ``direction`` (None or Vector) - with two entries in the base field
Used to determine labels when a pair of triangles is flipped. Each triangle
has a unique separatrix which points in the provided direction or its
negation. As such a vector determines a sign for each triangle.
A pair of adjacent triangles have opposite signs. Labels are chosen
so that this sign is preserved (as a function of labels).
- ``relabel`` (boolean) - If in_place is False, then a copy must be
made. By default relabel is False and labels will be respected by
this copy. If relabel is True then polygons will be reindexed in an
arbitrary way by the non-negative integers.
EXAMPLES::
sage: from flatsurf import *
sage: from flatsurf.geometry.delaunay import *
sage: m = matrix([[2,1],[1,1]])
sage: s = m*translation_surfaces.infinite_staircase()
sage: ss = s.delaunay_triangulation(relabel=True)
sage: ss.base_label()
0
sage: ss.polygon(0)
Polygon: (0, 0), (1, 1), (0, 1)
sage: TestSuite(ss).run(skip="_test_pickling")
sage: ss.is_delaunay_triangulated(limit=10)
True
"""
if not self.is_finite() and limit is None:
if in_place:
raise ValueError("in_place delaunay triangulation is not possible for infinite surfaces unless a limit is set.")
if self.underlying_surface().is_mutable():
raise ValueError("delaunay_triangulation only works on infinite "+\
"surfaces if they are immutable or if a limit is set.")
from flatsurf.geometry.delaunay import LazyDelaunayTriangulatedSurface
return self.__class__(LazyDelaunayTriangulatedSurface( \
self,direction=direction, relabel=relabel))
if in_place and not self.is_mutable():
raise ValueError("in_place delaunay_triangulation only defined for mutable surfaces")
if triangulated:
if in_place:
s=self
else:
s=self.copy(mutable=True, relabel=False)
else:
if in_place:
s=self
self.triangulate(in_place=True)
else:
s=self.copy(relabel=True,mutable=True)
s.triangulate(in_place=True)
loop=True
if direction is None:
base_ring = self.base_ring()
direction = self.vector_space()( (base_ring.zero(), base_ring.one()) )
else:
assert not direction.is_zero()
if s.is_finite() and limit is None:
from collections import deque
unchecked_labels=deque(label for label in s.label_iterator())
checked_labels = set()
while unchecked_labels:
label = unchecked_labels.popleft()
flipped=False
for edge in range(3):
if s._edge_needs_flip(label,edge):
# Record the current opposite edge:
label2,edge2=s.opposite_edge(label,edge)
# Perform the flip.
s.triangle_flip(label, edge, in_place=True, direction=direction)
# Move the opposite polygon to the list of labels we need to check.
if label2 != label:
try:
checked_labels.remove(label2)
unchecked_labels.append(label2)
except KeyError:
# Occurs if label2 is not in checked_labels
pass
flipped=True
break
if flipped:
unchecked_labels.append(label)
else:
checked_labels.add(label)
return s
else:
# Old method for infinite surfaces, or limits.
count=0
lc = self._label_comparator()
while loop:
loop=False
for (l1,e1),(l2,e2) in s.edge_iterator(gluings=True):
if (lc.lt(l1,l2) or (l1==l2 and e1<=e2)) and s._edge_needs_flip(l1,e1):
s.triangle_flip(l1, e1, in_place=True, direction=direction)
count += 1
if not limit is None and count>=limit:
return s
loop=True
break
return s
def delaunay_single_join(self):
if not self.is_finite():
raise NotImplementedError("Not implemented for infinite surfaces.")
lc = self._label_comparator()
for (l1,e1),(l2,e2) in self.edge_iterator(gluings=True):
if (lc.lt(l1,l2) or (l1==l2 and e1<=e2)) and self._edge_needs_join(l1,e1):
self.join_polygons(l1, e1, in_place=True)
return True
return False
def delaunay_decomposition(self, triangulated=False, \
delaunay_triangulated=False, in_place=False, direction=None,\
relabel=False):
r"""
Return the Delaunay Decomposition of this surface.
INPUT:
- ``triangulated`` (boolean) - If true, the algorithm assumes the
surface is already triangulated. It does this without verification.
- ``delaunay_triangulated`` (boolean) - If true, the algorithm assumes
the surface is already delaunay_triangulated. It does this without
verification.
- ``in_place`` (boolean) - If true, the triangulating and the triangle
flips are done in place. Otherwise, a mutable copy of the surface is
made.
- ``relabel`` (None or Integer) - If in_place is False, then a copy
must be made of the surface. If relabel is False (as default), the
copy has the same labels as the original surface. Note that in this
case, labels will be added if it is necessary to subdivide polygons
into triangles. If relabel is True, the new surface will have
polygons labeled by the non-negative integers in an arbitrary way.
- ``direction`` - (None or Vector with two entries in the base field) -
Used to determine labels when a pair of triangles is flipped. Each triangle
has a unique separatrix which points in the provided direction or its
negation. As such a vector determines a sign for each triangle.
A pair of adjacent triangles have opposite signs. Labels are chosen
so that this sign is preserved (as a function of labels).
EXAMPLES::
sage: from flatsurf import *
sage: s0 = translation_surfaces.octagon_and_squares()
sage: a = s0.base_ring().gens()[0]
sage: m = Matrix([[1,2+a],[0,1]])
sage: s = m*s0
sage: s = s.triangulate()
sage: ss = s.delaunay_decomposition(triangulated=True)
sage: ss.num_polygons()
3
sage: p = polygons((4,0),(-2,1),(-2,-1))
sage: s0 = similarity_surfaces.self_glued_polygon(p)
sage: s = s0.delaunay_decomposition()
sage: TestSuite(s).run()
sage: m = matrix([[2,1],[1,1]])
sage: s = m*translation_surfaces.infinite_staircase()
sage: ss = s.delaunay_decomposition()
sage: ss.base_label()
0
sage: ss.polygon(0)
Polygon: (0, 0), (1, 0), (1, 1), (0, 1)
sage: TestSuite(ss).run(skip="_test_pickling")
sage: ss.is_delaunay_decomposed(limit=10)
True
"""
if not self.is_finite():
if in_place:
raise ValueError("in_place delaunay_decomposition is not possible for infinite surfaces.")
if self.underlying_surface().is_mutable():
raise ValueError("delaunay_decomposition only works on infinite "+\
"surfaces if they are immutable.")
from flatsurf.geometry.delaunay import LazyDelaunaySurface
return self.__class__(LazyDelaunaySurface( \
self,direction=direction, relabel=relabel))
if in_place:
s=self
else:
s=self.copy(mutable=True, relabel=relabel)
if not delaunay_triangulated:
s.delaunay_triangulation(triangulated=triangulated, in_place=True, \
direction=direction)
# Now s is Delaunay Triangulated
loop=True
lc = self._label_comparator()
while loop:
loop=False
for (l1,e1),(l2,e2) in s.edge_iterator(gluings=True):
if (lc.lt(l1,l2) or (l1==l2 and e1<=e2)) and s._edge_needs_join(l1,e1):
s.join_polygons(l1, e1, in_place=True)
loop=True
break
return s
def saddle_connections(self, squared_length_bound, initial_label=None, initial_vertex=None, sc_list=None, check=False):
r"""
Returns a list of saddle connections on the surface whose length squared is less than or equal to squared_length_bound.
The length of a saddle connection is measured using holonomy from polygon in which the trajectory starts.
If initial_label and initial_vertex are not provided, we return all saddle connections satisfying the bound condition.
If initial_label and initial_vertex are provided, it only provides saddle connections emanating from the corresponding
vertex of a polygon. If only initial_label is provided, the added saddle connections will only emanate from the
corresponding polygon.
If sc_list is provided the found saddle connections are appended to this list and the resulting list is returned.
If check==True it uses the checks in the SaddleConnection class to sanity check our results.
EXAMPLES::
sage: from flatsurf import *
sage: s = translation_surfaces.square_torus()
sage: sc_list = s.saddle_connections(13, check=True)
sage: len(sc_list)
32
"""
assert squared_length_bound > 0
if sc_list is None:
sc_list = []
if initial_label is None:
assert self.is_finite()
assert initial_vertex is None, "If initial_label is not provided, then initial_vertex must not be provided either."
for label in self.label_iterator():
self.saddle_connections(squared_length_bound, initial_label=label, sc_list=sc_list)
return sc_list
if initial_vertex is None:
for vertex in range( self.polygon(initial_label).num_edges() ):
self.saddle_connections(squared_length_bound, initial_label=initial_label, initial_vertex=vertex, sc_list=sc_list)
return sc_list
# Now we have a specified initial_label and initial_vertex
SG = SimilarityGroup(self.base_ring())
start_data = (initial_label, initial_vertex)
circle = Circle(self.vector_space().zero(), squared_length_bound, base_ring = self.base_ring())
p = self.polygon(initial_label)
v = p.vertex(initial_vertex)
last_sim = SG(-v[0],-v[1])
# First check the edge eminating rightward from the start_vertex.
e = p.edge(initial_vertex)
if e[0]**2 + e[1]**2 <= squared_length_bound:
sc_list.append( SaddleConnection(self, start_data, e) )
# Represents the bounds of the beam of trajectories we are sending out.
wedge = ( last_sim( p.vertex((initial_vertex+1)%p.num_edges()) ),
last_sim( p.vertex((initial_vertex+p.num_edges()-1)%p.num_edges()) ))
# This will collect the data we need for a depth first search.
chain = [(last_sim, initial_label, wedge, [(initial_vertex+p.num_edges()-i)%p.num_edges() for i in range(2,p.num_edges())])]
while len(chain)>0:
# Should verts really be edges?
sim, label, wedge, verts = chain[-1]
if len(verts) == 0:
chain.pop()
continue
vert = verts.pop()
#print("Inspecting "+str(vert))
p = self.polygon(label)
# First check the vertex
vert_position = sim(p.vertex(vert))
#print(wedge[1].n())
if wedge_product(wedge[0], vert_position) > 0 and \
wedge_product(vert_position, wedge[1]) > 0 and \
vert_position[0]**2 + vert_position[1]**2 <= squared_length_bound:
sc_list.append( SaddleConnection(self, start_data, vert_position,
end_data = (label,vert),
end_direction = ~sim.derivative()*-vert_position,
holonomy = vert_position,
end_holonomy = ~sim.derivative()*-vert_position,
check = check) )
# Now check if we should develop across the edge
vert_position2 = sim(p.vertex( (vert+1)%p.num_edges() ))
if wedge_product(vert_position,vert_position2)>0 and \
wedge_product(wedge[0],vert_position2)>0 and \
wedge_product(vert_position,wedge[1])>0 and \
circle.line_segment_position(vert_position, vert_position2)==1:
if wedge_product(wedge[0], vert_position) > 0:
# First in new_wedge should be vert_position
if wedge_product(vert_position2, wedge[1]) > 0:
new_wedge = (vert_position, vert_position2)
else:
new_wedge = (vert_position, wedge[1])
else:
if wedge_product(vert_position2, wedge[1]) > 0:
new_wedge = (wedge[0], vert_position2)
else:
new_wedge=wedge
new_label, new_edge = self.opposite_edge(label, vert)
new_sim = sim*~self.edge_transformation(label,vert)
p = self.polygon(new_label)
chain.append( (new_sim, new_label, new_wedge, [(new_edge+p.num_edges()-i)%p.num_edges() for i in range(1,p.num_edges())]) )
return sc_list
def set_default_graphical_surface(self, graphical_surface):
r"""
Replace the default graphical surface with the provided GraphicalSurface.
"""
from flatsurf.graphical.surface import GraphicalSurface
if not isinstance(graphical_surface, GraphicalSurface):
raise ValueError("graphical_surface must be a GraphicalSurface")
if self != graphical_surface.get_surface():
raise ValueError("The provided graphical_surface renders a different surface!")
self._gs = graphical_surface
def graphical_surface(self, *args, **kwds):
r"""
Return a GraphicalSurface representing this surface.
By default this returns a cached version of the GraphicalSurface. If
``cached=False`` is provided as a keyword option then a new
GraphicalSurface is returned. Other keyword options:
INPUT:
- ``cached`` -- a boolean (default ``True``). If true return a cached
GraphicalSurface. Otherwise we make a new one.
- ``polygon_labels`` -- a boolean (default ``True``) whether the label
of polygons are displayed
- ``edge_labels`` -- option to control the display of edge labels. It
can be one of
- ``False`` or ``None`` for no labels
- ``'gluings'`` -- to put on each side of each non-adjacent edge, the
name of the polygon to which it is glued
- ``'number'`` -- to put on each side of each edge the number of the
edge
- ``'gluings and numbers'`` -- full information
- ``'letter'`` -- add matching letters to glued edges in an arbitrary way
- ``default_position_function`` -- a function mapping polygon labels to
similarities describing the position of the corresponding polygon.
EXAMPLES:
Test the difference between the cached graphical_surface and the uncached version::
sage: from flatsurf import *
sage: s = translation_surfaces.octagon_and_squares()
sage: s.plot() # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 32 graphics primitives
sage: s.graphical_surface(cached=False,adjacencies=[]).plot() # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 18 graphics primitives
"""
from flatsurf.graphical.surface import GraphicalSurface
if "cached" in kwds:
if not kwds["cached"]:
# cached=False: return a new surface.
kwds.pop("cached",None)
return GraphicalSurface(self, *args, **kwds)
kwds.pop("cached",None)
if hasattr(self, '_gs'):
self._gs.process_options(*args, **kwds)
else:
self._gs = GraphicalSurface(self, *args, **kwds)
return self._gs
def plot(self, *args, **kwds):
r"""
Returns a plot of the surface.
There may be zero or one argument. If provided the single argument
should be a GraphicalSurface whick will be used in the plot.
INPUT:
- ``polygon_labels`` -- a boolean (default ``True``) whether the label
of polygons are displayed
- ``edge_labels`` -- option to control the display of edge labels. It
can be one of
- ``False`` or ``None`` for no labels
- ``'gluings'`` -- to put on each side of each non-adjacent edge, the
name of the polygon to which it is glued
- ``'number'`` -- to put on each side of each edge the number of the
edge
- ``'gluings and number'`` -- full information
- ``adjacencies`` -- a list of pairs ``(p,e)`` to be used to set
adjacencies of polygons.
- ``default_position_function`` -- a function mapping polygon labels to
similarities describing the position of the corresponding polygon.
"""
if len(args) > 1:
raise ValueError("SimilaritySurface.plot() can take at most one non-keyword argument.")
if len(args)==1:
from flatsurf.graphical.surface import GraphicalSurface
if not isinstance(args[0], GraphicalSurface):
raise ValueError("If an argument is provided, it must be a GraphicalSurface.")
gs = args[0]
gs.process_options(**kwds)
else:
gs = self.graphical_surface(**kwds)
return gs.plot()
def plot_polygon(self, label, graphical_surface = None,
plot_polygon = True, plot_edges = True, plot_edge_labels = True,
edge_labels = None,
polygon_options = {"axes":True}, edge_options = None, edge_label_options = None):
r"""
Returns a plot of the polygon with the provided label.
Note that this method plots the polygon in its coordinates as opposed to
graphical coordinates that the :func:``plot`` method uses. This makes it useful
for visualizing the natural coordinates of the polygon.
INPUT:
- ``graphical_surface`` -- (default ``None``) If provided this function pulls graphical options
from the graphical surface. If not provided, we use the default graphical surface.
- ``plot_polygon`` -- (default ``True``) If True, we plot the solid polygon.
- ``polygon_options`` -- (default ``{"axes":True}``) Options for the rendering of the polygon.
These options will be passed to :func:`~flatsurf.graphical.polygon.GraphicalPolygon.plot_polygon`.
This should be either None or a dictionary.
- ``plot_edges`` -- (default ``True``) If True, we plot the edges of the polygon as segments.
- ``edge_options`` -- (default ``None``) Options for the rendering of the polygon edges.
These options will be passed to :func:`~flatsurf.graphical.polygon.GraphicalPolygon.plot_edge`.
This should be either None or a dictionary.
- ``plot_edge_labels`` -- (default ``True``) If True, we plot labels on the edges.
- ``edge_label_options`` -- (default ``None``) Options for the rendering of the edge labels.
These options will be passed to :func:`~flatsurf.graphical.polygon.GraphicalPolygon.plot_edge_label`.
This should be either None or a dictionary.
- ``edge_labels`` -- (default ``None``) If None and plot_edge_labels is True, we write the edge
number on each edge. Otherwise edge_labels should be a list of strings of length equal to the
number of edges of the polygon. The strings will be printed on each edge.
EXAMPLES::
sage: from flatsurf import *
sage: s = similarity_surfaces.example()
sage: s.plot() # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 13 graphics primitives
s.plot_polygon(1) # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 7 graphics primitives
sage: labels = []
sage: p = s.polygon(1)
sage: for e in range(p.num_edges()): \
labels.append(str(p.edge(e)))
sage: s.plot_polygon(1, polygon_options=None, plot_edges=False, \
edge_labels=labels, edge_label_options={"color":"red"}) # not tested (problem with matplotlib font caches on Travis)
Graphics object consisting of 4 graphics primitives
"""
if graphical_surface is None:
graphical_surface = self.graphical_surface()
p = self.polygon(label)
from flatsurf.graphical.polygon import GraphicalPolygon
gp = GraphicalPolygon(p)
if plot_polygon:
if polygon_options is None:
o = graphical_surface.polygon_options
else:
o = graphical_surface.polygon_options.copy()
o.update(polygon_options)
plt = gp.plot_polygon(**o)
if plot_edges:
if edge_options is None:
o = graphical_surface.non_adjacent_edge_options
else:
o = graphical_surface.non_adjacent_edge_options.copy()
o.update(edge_options)
for e in range(p.num_edges()):
plt += gp.plot_edge(e, **o)
if plot_edge_labels:
if edge_label_options is None:
o = graphical_surface.edge_label_options
else:
o = graphical_surface.edge_label_options.copy()
o.update(edge_label_options)
for e in range(p.num_edges()):
if edge_labels is None:
el = str(e)
else:
el = edge_labels[e]
plt += gp.plot_edge_label(e, el, **o)
return plt
# I'm not sure we want to support this...
#
# def minimize_monodromy_mapping(self):
# r"""
# Return a mapping from this surface to a similarity surface
# with a minimal monodromy group.
# Note that this may be slow for infinite surfaces.
#
# EXAMPLES::
# sage: from flatsurf.geometry.polygon import ConvexPolygons
# sage: K.<sqrt2> = NumberField(x**2 - 2, embedding=1.414)
# sage: octagon = ConvexPolygons(K)([(1,0),(sqrt2/2, sqrt2/2),(0, 1),(-sqrt2/2, sqrt2/2),(-1,0),(-sqrt2/2, -sqrt2/2),(0, -1),(sqrt2/2, -sqrt2/2)])
# sage: square = ConvexPolygons(K)([(1,0),(0,1),(-1,0),(0,-1)])
# sage: gluings = [((0,i),(1+(i%2),i//2)) for i in range(8)]
# sage: from flatsurf.geometry.surface import surface_from_polygons_and_gluings
# sage: s=surface_from_polygons_and_gluings([octagon,square,square],gluings)
# sage: print s
# Rational cone surface built from 3 polygons
# sage: m=s.minimize_monodromy_mapping()
# sage: s2=m.codomain()
# sage: print s2
# Translation surface built from 3 polygons
# sage: v=s.tangent_vector(2,(0,0),(1,0))
# sage: print m.push_vector_forward(v)
# SimilaritySurfaceTangentVector in polygon 2 based at (0, 0) with vector (-1/2*sqrt2, -1/2*sqrt2)
# sage: w=s2.tangent_vector(2,(0,0),(0,-1))
# sage: print m.pull_vector_back(w)
# SimilaritySurfaceTangentVector in polygon 2 based at (0, 0) with vector (1/2*sqrt2, 1/2*sqrt2)
# """
# lw = self.walker()
# class MatrixFunction:
# def __init__(self, lw):
# self._lw=lw
# from sage.matrix.constructor import identity_matrix
# self._d = {lw.surface().base_label():
# identity_matrix(lw.surface().base_ring(), n=2)}
# def __call__(self, label):
# try:
# return self._d[label]
# except KeyError:
# e = self._lw.edge_back(label)
# label2,e2 = self._lw.surface().opposite_edge(label,e)
# m=self._lw.surface().edge_matrix(label,e) * self(label2)
# self._d[label]=m
# return m
# mf = MatrixFunction(lw)
# from flatsurf.geometry.mappings import (
# MatrixListDeformedSurfaceMapping,
# IdentityMapping)
# mapping = MatrixListDeformedSurfaceMapping(self, mf)
# surface_type = mapping.codomain().compute_surface_type_from_gluings(limit=100)
# new_codomain = convert_to_type(mapping.codomain(),surface_type)
# identity = IdentityMapping(mapping.codomain(), new_codomain)
# return identity * mapping
#
# def minimal_monodromy_surface(self):
# r"""
# Return an equivalent similarity surface with minimal monodromy.
# Note that this may be slow for infinite surfaces.
#
# EXAMPLES::
# sage: from flatsurf.geometry.polygon import ConvexPolygons
# sage: K.<sqrt2> = NumberField(x**2 - 2, embedding=1.414)
# sage: octagon = ConvexPolygons(K)([(1,0),(sqrt2/2, sqrt2/2),(0, 1),(-sqrt2/2, sqrt2/2),(-1,0),(-sqrt2/2, -sqrt2/2),(0, -1),(sqrt2/2, -sqrt2/2)])
# sage: square = ConvexPolygons(K)([(1,0),(0,1),(-1,0),(0,-1)])
# sage: gluings = [((0,i),(1+(i%2),i//2)) for i in range(8)]
# sage: from flatsurf.geometry.surface import surface_from_polygons_and_gluings
# sage: s=surface_from_polygons_and_gluings([octagon,square,square],gluings)
# sage: print s
# Rational cone surface built from 3 polygons
# sage: s2=s.minimal_monodromy_surface()
# sage: print s2
# Translation surface built from 3 polygons
# """
# return self.minimize_monodromy_mapping().codomain()
def __eq__(self, other):
r"""
Implements a naive notion of equality where two finite surfaces are equal if:
- their base labels are equal,
- their polygons are equal and labeled and glued in the same way.
For infinite surfaces we use reference equality.
Raises a value error if the surfaces are defined over different rings.
"""
if not self.is_finite():
return self is other
if self is other:
return True
if not isinstance(other, SimilaritySurface):
raise TypeError
if not other.is_finite():
raise ValueError("Can not compare infinite surfaces.")
if self.base_ring() != other.base_ring():
raise ValueError("Refusing to compare surfaces with different base rings.")
if not self.is_mutable() and not other.is_mutable():
hash1 = hash(self)
hash2 = hash(other)
if hash1 != hash2:
return False
if self.base_label() != other.base_label():
return False
if self.num_polygons() != other.num_polygons():
return False
for label,polygon in self.label_iterator(polygons=True):
try:
polygon2 = other.polygon(label)
except ValueError:
return False
if polygon != polygon2:
return False
for edge in range(polygon.num_edges()):
if self.opposite_edge(label,edge) != other.opposite_edge(label,edge):
return False
return True
def __ne__(self, other):
return not self == other
def __hash__(self):
r"""
Hash compatible with equals.
"""
if self._s.is_mutable():
raise ValueError("Attempting to hash with mutable underlying surface.")
if hasattr(self, '_hash'):
# Return the cached hash.
return self._hash
# Compute the hash
h = 17*hash(self.base_ring())+23*hash(self.base_label())
for pair in self.label_iterator(polygons=True):
h = h + 7*hash(pair)
for edgepair in self.edge_iterator(gluings=True):
h = h + 3*hash(edgepair)
self._hash=h
return h
def erase_marked_points(self):
r"""
Return an isometric or similar surface without regular vertex of angle
2pi (except in the torus case).
EXAMPLES::
sage: import flatsurf
sage: G = SymmetricGroup(4)
sage: S = flatsurf.translation_surfaces.origami(G('(1,2,3,4)'), G('(1,4,2,3)'))
sage: S.stratum()
H_2(2, 0)
sage: S.erase_marked_points().stratum()
H_2(2)
sage: for (a,b,c) in [(1,4,11), (1,4,15), (3,4,13)]:
....: T = flatsurf.polygons.triangle(a,b,c)
....: S = flatsurf.similarity_surfaces.billiard(T)
....: S = S.minimal_cover("translation")
....: print(S.erase_marked_points().stratum())
H_6(10)
H_6(2^5)
H_8(12, 2)
"""
if self.is_triangulated():
surface = self
else:
surface = self.triangulate()
surface = surface.copy(relabel=True)
angles = [adj for a,adj in surface.angles(return_adjacent_edges=True) if a == 1]
C = ConvexPolygons(self.base_ring())
V = VectorSpace(self.base_ring(), 2)
while angles:
# remove the vertex corresponding to angles[-1]
adj = angles.pop()
assert surface.is_triangulated()
n = len(adj)
moved_triangles = sorted(set(p for p,_ in adj))
if len(moved_triangles) != n:
# we have an edge from the regular point to itself
# this is an edge inside a cylinder
# TODO: treat this case via
# 1. try a flip
# 2. try harder
raise NotImplementedError
# glue together the triangles adjacent in adj to form a (non-necessarily
# convex) polygon
a = 1
b = 0
boundary = [] # pairs (p,e) of edges on the boundary
boundary_inv = {} # inverse map (p,e) -> index in boundary
vertices = [] # vertices of the modified polygon
for num,(p,e) in enumerate(adj):
P = surface.polygon(p)
x,y = P.edge(e)
vertices.append(V((x, y)))
f = (e + 1) % 3
boundary.append((p, f))
boundary_inv[(p, f)] = num
e = (e - 1) % 3
sim = surface.edge_transformation(p, e)
if sim._a != 1 or sim._b != 0:
raise NotImplementedError("only translation surfaces are supported for now")
assert a == 1 and b == 0
# triangulate this polygon
edges = triangulate(vertices)
combinatorial_triangles = build_faces(len(vertices), edges)
assert len(combinatorial_triangles) == len(moved_triangles) - 2
new_triangles = []
edge_to_lab = {}
for num,(i,j,k) in enumerate(combinatorial_triangles):
new_triangles.append(C(vertices=[vertices[i], vertices[j], vertices[k]]))
assert (i,j) not in edge_to_lab
assert (j,k) not in edge_to_lab
assert (k,i) not in edge_to_lab
edge_to_lab[(i,j)] = edge_to_lab[(j,k)] = edge_to_lab[(k,i)] = num
# build a new surface
S = Surface_list(surface.base_ring())
for p in surface.label_iterator():
if p not in moved_triangles:
S.add_polygon(surface.polygon(p), label=p)
for p,T in zip(moved_triangles, new_triangles):
S.add_polygon(T, label=p)
# glue edges away from the modified zone
for p in surface.label_iterator():
if p in moved_triangles:
continue
for e in range(3):
pp,ee = surface.opposite_edge(p, e)
if pp not in moved_triangles:
S.set_edge_pairing(p, e, pp, ee)
# glue edges of the modified zone
for p, tverts in zip(moved_triangles, combinatorial_triangles):
for e in range(3):
i = tverts[e]
j = tverts[(e+1)%3]
if j == (i+1)%n:
# boundary edge
pold,eold = boundary[i]
ppold, eeold = surface.opposite_edge(pold, eold)
if (ppold,eeold) in boundary_inv:
# glued to another triangle of the modified zone
ii = boundary_inv[ppold,eeold]
jj = (ii+1)%n
num = edge_to_lab[ii,jj]
pp = moved_triangles[num]
t = combinatorial_triangles[num]
ee = t.index(ii)
S.set_edge_pairing(p, e, pp, ee)
else:
# glued to an old triangle
pp = ppold
ee = eeold
S.set_edge_pairing(p, e, pp, ee)
else:
# internal edge
num = edge_to_lab[j,i]
pp = moved_triangles[num]
ee = combinatorial_triangles[num].index(j)
S.set_edge_pairing(p, e, pp, ee)
surface = type(self)(S)
from sage.misc.sage_unittest import TestSuite
TestSuite(surface).run()
angles = [adj for a,adj in surface.angles(return_adjacent_edges=True) if a == 1]
return surface
|
gpl-2.0
|
raincoatrun/ThinkStats2
|
code/hinc.py
|
67
|
1494
|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import thinkplot
import thinkstats2
def Clean(s):
"""Converts dollar amounts to integers."""
try:
return int(s.lstrip('$').replace(',', ''))
except ValueError:
if s == 'Under':
return 0
elif s == 'over':
return np.inf
return None
def ReadData(filename='hinc06.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
data = pandas.read_csv(filename, header=None, skiprows=9)
cols = data[[0, 1]]
res = []
for _, row in cols.iterrows():
label, freq = row.values
freq = int(freq.replace(',', ''))
t = label.split()
low, high = Clean(t[0]), Clean(t[-1])
res.append((high, freq))
df = pandas.DataFrame(res)
# correct the first range
df[0][0] -= 1
# compute the cumulative sum of the freqs
df[2] = df[1].cumsum()
# normalize the cumulative freqs
total = df[2][41]
df[3] = df[2] / total
# add column names
df.columns = ['income', 'freq', 'cumsum', 'ps']
return df
def main():
df = ReadData()
print(df)
if __name__ == "__main__":
main()
|
gpl-3.0
|
hochthom/kaggle-taxi-ii
|
src/mk_submission_Experts.py
|
2
|
1994
|
import os
import time
import numpy as np
import cPickle as pickle
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.cross_validation import ShuffleSplit
from utils import haversineKaggle, rmse, CITY_CENTER
t0 = time.time()
df = pd.read_csv('../data/test_pp_RND.csv')
df = df.drop(['TRIP_ID', 'CALL_TYPE', 'TAXI_ID'], axis = 1)
X_tst = np.array(df, dtype=np.float)
pred = {}
for id_ in range(320):
filename = '../data/train_pp_TST_%i.csv' % id_
if not os.path.isfile(filename):
continue
df = pd.read_csv(filename)
if df.shape[0] < 1000:
print('skipping key point %i (%i)' % (id_, df.shape[0]))
continue
# factorize categorical columns in training set
#df['CALL_TYPE'], ct_index = pd.factorize(df['CALL_TYPE'])
#df = df[df['CALL_TYPE'] == 0] # A=2, B=1, C=0
# fill all NaN values with -1
#df = df.fillna(-1)
# remove long distance
d1 = haversineKaggle(df[['xs', 'ys']], df[['xe', 'ye']])
th1 = np.percentile(d1, [99.9])
df = df.loc[d1 < th1]
y = np.ravel(np.log(df['len']*15 + 1))
df.drop(['CALL_TYPE', 'TAXI_ID', 'xe', 'ye', 'len'], axis=1, inplace=True)
X = np.array(df, dtype=np.float)
print('training classifier of key point %i (sz=%i) ...' % (id_, X.shape[0]))
# Initialize the famous Random Forest Regressor from scikit-learn
clf = RandomForestRegressor(n_estimators=200, n_jobs=-1, random_state=21)
clf.fit(X, y)
pred_rf = clf.predict(X_tst[id_, :])
clf = GradientBoostingRegressor(n_estimators=200, max_depth=3, random_state=21)
clf.fit(X, y)
pred_gb = clf.predict(X_tst[id_, :])
#print 'predicting test data ...'
pred[id_] = {'rfr':pred_rf, 'gbr':pred_gb, 'size':X.shape[0]}
with open('predictions_TVT_experts.pkl', 'wb') as fp:
pickle.dump(pred, fp, -1)
print('Done in %.1f sec.' % (time.time() - t0))
|
mit
|
PROSIC/PROSIC
|
prosic/TernaryClassification.py
|
1
|
2691
|
#!/usr/bin/env python
from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
__author__ = "Louis Dijkstra"
"""
Contains the functionality needed to plot the results of a ternary classification task,
e.g., somatic/germline/not present or absent/heterozygous/homozygous.
"""
def normalizeTable(table):
"""Normalizes the rows in a 3x3 table, i.e., every row sums up to 100%."""
for row in [0,1,2]:
row_total = 0.0
for column in [0,1,2]: # compute row total
row_total += table[row][column]
if row_total != 0:
for column in [0,1,2]: # normalize
table[row][column] = table[row][column] / row_total * 100.0
return table
def returnFancyIntervalString(interval):
"""Returns the interval in the form of a string. Suitable for plotting."""
if interval[0] is None:
if interval[1] is None: # unbounded
return r".-."
else: # <= interval[1]
return r"$\leq$" + str(interval[1])
elif interval[1] is None: # >= interval[0]
return r"$\geq$" + str(interval[0])
return str(interval[0]) + '-' + str(interval[1])
def plotTernaryClassification (class1, class2, class3, length_ranges, class_names = ['not present', 'heterozygous', 'homozygous'], width = .9):
layer1, layer2, layer3, labels = [], [], [], []
for i in range(len(length_ranges)):
layer1.append(class1[i][0])
layer2.append(class1[i][1])
layer3.append(class1[i][2])
labels.append(returnFancyIntervalString(length_ranges[i]))
for i in [0]:
layer1.append(0)
layer2.append(0)
layer3.append(0)
labels.append('')
for i in range(len(length_ranges)):
layer1.append(class2[i][0])
layer2.append(class2[i][1])
layer3.append(class2[i][2])
labels.append(returnFancyIntervalString(length_ranges[i]))
for i in [0]:
layer1.append(0)
layer2.append(0)
layer3.append(0)
labels.append('')
for i in range(len(length_ranges)):
layer1.append(class3[i][0])
layer2.append(class3[i][1])
layer3.append(class3[i][2])
labels.append(returnFancyIntervalString(length_ranges[i]))
ind = np.arange(len(labels))
bottom = layer1
plot_layer1 = plt.bar(ind, layer1, width, color = 'b')
plot_layer2 = plt.bar(ind, layer2, width, color = 'r', bottom = bottom)
for i in range(len(bottom)):
bottom[i] += layer2[i]
plot_layer3 = plt.bar(ind, layer3, width, color = 'g', bottom = bottom)
x_min,x_max,y_min,y_max = plt.axis()
plt.axis((x_min, x_max, 0, 100))
plt.xticks(ind + width / 2.0, labels)
plt.yticks(np.arange(0,101,20), ('0%', '20%', '40%', '60%', '80%', '100%'))
plt.grid(True, axis='y')
plt.legend( (plot_layer1[0], plot_layer2[0], plot_layer3[0]), class_names, loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=3)
plt.show()
|
gpl-3.0
|
alexandrebarachant/mne-python
|
mne/decoding/time_gen.py
|
1
|
64983
|
# Authors: Jean-Remi King <[email protected]>
# Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Clement Moutard <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import copy
from .base import _set_cv
from ..io.pick import _pick_data_channels
from ..viz.decoding import plot_gat_matrix, plot_gat_times
from ..parallel import parallel_func, check_n_jobs
from ..utils import warn, check_version
class _DecodingTime(dict):
"""A dictionary to configure the training times that has the following keys:
'slices' : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
'start' : float
Time at which to start decoding (in seconds).
Defaults to min(epochs.times).
'stop' : float
Maximal time at which to stop decoding (in seconds).
Defaults to max(times).
'step' : float
Duration separating the start of subsequent classifiers (in
seconds). Defaults to one time sample.
'length' : float
Duration of each classifier (in seconds). Defaults to one time sample.
If None, empty dict. """
def __repr__(self):
s = ""
if "start" in self:
s += "start: %0.3f (s)" % (self["start"])
if "stop" in self:
s += ", stop: %0.3f (s)" % (self["stop"])
if "step" in self:
s += ", step: %0.3f (s)" % (self["step"])
if "length" in self:
s += ", length: %0.3f (s)" % (self["length"])
if "slices" in self:
# identify depth: training times only contains n_time but
# testing_times can contain n_times or n_times * m_times
depth = [len(ii) for ii in self["slices"]]
if len(np.unique(depth)) == 1: # if all slices have same depth
if depth[0] == 1: # if depth is one
s += ", n_time_windows: %s" % (len(depth))
else:
s += ", n_time_windows: %s x %s" % (len(depth), depth[0])
else:
s += (", n_time_windows: %s x [%s, %s]" %
(len(depth),
min([len(ii) for ii in depth]),
max(([len(ii) for ii in depth]))))
return "<DecodingTime | %s>" % s
class _GeneralizationAcrossTime(object):
"""Generic object to train and test a series of classifiers at and across
different time samples.
""" # noqa
def __init__(self, picks=None, cv=5, clf=None, train_times=None,
test_times=None, predict_method='predict',
predict_mode='cross-validation', scorer=None,
score_mode='mean-fold-wise', n_jobs=1):
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
# Store parameters in object
self.cv = cv
# Define training sliding window
self.train_times = (_DecodingTime() if train_times is None
else _DecodingTime(train_times))
# Define testing sliding window. If None, will be set in predict()
if test_times is None:
self.test_times = _DecodingTime()
elif test_times == 'diagonal':
self.test_times = 'diagonal'
else:
self.test_times = _DecodingTime(test_times)
# Default classification pipeline
if clf is None:
scaler = StandardScaler()
estimator = LogisticRegression()
clf = Pipeline([('scaler', scaler), ('estimator', estimator)])
self.clf = clf
self.predict_mode = predict_mode
self.scorer = scorer
self.score_mode = score_mode
self.picks = picks
self.predict_method = predict_method
self.n_jobs = n_jobs
def fit(self, epochs, y=None):
"""Train a classifier on each specified time slice.
.. note::
This function sets the ``picks_``, ``ch_names``, ``cv_``,
``y_train``, ``train_times_`` and ``estimators_`` attributes.
Parameters
----------
epochs : instance of Epochs
The epochs.
y : list or ndarray of int, shape (n_samples,) or None, optional
To-be-fitted model values. If None, y = epochs.events[:, 2].
Returns
-------
self : GeneralizationAcrossTime
Returns fitted GeneralizationAcrossTime object.
Notes
-----
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
from sklearn.base import clone
# Clean attributes
for att in ['picks_', 'ch_names', 'y_train_', 'cv_', 'train_times_',
'estimators_', 'test_times_', 'y_pred_', 'y_true_',
'scores_', 'scorer_']:
if hasattr(self, att):
delattr(self, att)
n_jobs = self.n_jobs
# Extract data from MNE structure
X, y, self.picks_ = _check_epochs_input(epochs, y, self.picks)
self.ch_names = [epochs.ch_names[p] for p in self.picks_]
# Prepare cross-validation
self.cv_, self._cv_splits = _set_cv(self.cv, self.clf, X=X, y=y)
self.y_train_ = y
# Get train slices of times
self.train_times_ = _sliding_window(epochs.times, self.train_times,
epochs.info['sfreq'])
# Parallel across training time
# TODO: JRK: Chunking times points needs to be simplified
parallel, p_func, n_jobs = parallel_func(_fit_slices, n_jobs)
n_chunks = min(len(self.train_times_['slices']), n_jobs)
time_chunks = np.array_split(self.train_times_['slices'], n_chunks)
out = parallel(p_func(clone(self.clf),
X[..., np.unique(np.concatenate(time_chunk))],
y, time_chunk, self._cv_splits)
for time_chunk in time_chunks)
# Unpack estimators into time slices X folds list of lists.
self.estimators_ = sum(out, list())
return self
def predict(self, epochs):
"""Classifiers' predictions on each specified testing time slice.
.. note::
This function sets the ``y_pred_`` and ``test_times_`` attributes.
Parameters
----------
epochs : instance of Epochs
The epochs. Can be similar to fitted epochs or not. See
predict_mode parameter.
Returns
-------
y_pred : list of lists of arrays of floats, shape (n_train_t, n_test_t, n_epochs, n_prediction_dims)
The single-trial predictions at each training time and each testing
time. Note that the number of testing times per training time need
not be regular; else
``np.shape(y_pred_) = (n_train_time, n_test_time, n_epochs)``.
""" # noqa
# Check that classifier has predict_method (e.g. predict_proba is not
# always available):
if not hasattr(self.clf, self.predict_method):
raise NotImplementedError('%s does not have "%s"' % (
self.clf, self.predict_method))
# Check that at least one classifier has been trained
if not hasattr(self, 'estimators_'):
raise RuntimeError('Please fit models before trying to predict')
# Check predict mode
if self.predict_mode not in ['cross-validation', 'mean-prediction']:
raise ValueError('predict_mode must be a str, "mean-prediction" '
'or "cross-validation"')
# Check that training cv and predicting cv match
if self.predict_mode == 'cross-validation':
n_est_cv = [len(estimator) for estimator in self.estimators_]
heterogeneous_cv = len(set(n_est_cv)) != 1
mismatch_cv = n_est_cv[0] != len(self._cv_splits)
mismatch_y = len(self.y_train_) != len(epochs)
if heterogeneous_cv or mismatch_cv or mismatch_y:
raise ValueError(
'When predict_mode = "cross-validation", the training '
'and predicting cv schemes must be identical.')
# Clean attributes
for att in ['y_pred_', 'test_times_', 'scores_', 'scorer_', 'y_true_']:
if hasattr(self, att):
delattr(self, att)
_warn_once.clear() # reset self-baked warning tracker
X, y, _ = _check_epochs_input(epochs, None, self.picks_)
if not np.all([len(test) for train, test in self._cv_splits]):
warn('Some folds do not have any test epochs.')
# Define testing sliding window
if self.test_times == 'diagonal':
test_times = _DecodingTime()
test_times['slices'] = [[s] for s in self.train_times_['slices']]
test_times['times'] = [[s] for s in self.train_times_['times']]
elif isinstance(self.test_times, dict):
test_times = copy.deepcopy(self.test_times)
else:
raise ValueError('test_times must be a dict or "diagonal"')
if 'slices' not in test_times:
if 'length' not in self.train_times_.keys():
ValueError('Need test_times["slices"] with adhoc train_times.')
# Check that same number of time sample in testing than in training
# (otherwise it won 't be the same number of features')
test_times['length'] = test_times.get('length',
self.train_times_['length'])
# Make a sliding window for each training time.
slices_list = list()
for _ in range(len(self.train_times_['slices'])):
test_times_ = _sliding_window(epochs.times, test_times,
epochs.info['sfreq'])
slices_list += [test_times_['slices']]
test_times = test_times_
test_times['slices'] = slices_list
test_times['times'] = [_set_window_time(test, epochs.times)
for test in test_times['slices']]
for train, tests in zip(self.train_times_['slices'],
test_times['slices']):
# The user may define irregular timing. We thus need to ensure
# that the dimensionality of each estimator (i.e. training
# time) corresponds to the dimensionality of each testing time)
if not np.all([len(test) == len(train) for test in tests]):
raise ValueError('train_times and test_times must '
'have identical lengths')
# Store all testing times parameters
self.test_times_ = test_times
n_orig_epochs, _, n_times = X.shape
# Subselects the to-be-predicted epochs so as to manipulate a
# contiguous array X by using slices rather than indices.
test_epochs = []
if self.predict_mode == 'cross-validation':
test_idxs = [ii for train, test in self._cv_splits for ii in test]
start = 0
for _, test in self._cv_splits:
n_test_epochs = len(test)
stop = start + n_test_epochs
test_epochs.append(slice(start, stop, 1))
start += n_test_epochs
X = X[test_idxs]
# Prepare parallel predictions across testing time points
# FIXME Note that this means that TimeDecoding.predict isn't parallel
parallel, p_func, n_jobs = parallel_func(_predict_slices, self.n_jobs)
n_test_slice = max(len(sl) for sl in self.test_times_['slices'])
# Loop across estimators (i.e. training times)
n_chunks = min(n_test_slice, n_jobs)
chunks = [np.array_split(slices, n_chunks)
for slices in self.test_times_['slices']]
chunks = map(list, zip(*chunks))
# To minimize memory during parallelization, we apply some chunking
y_pred = parallel(p_func(
estimators=self.estimators_, cv_splits=self._cv_splits,
predict_mode=self.predict_mode, predict_method=self.predict_method,
n_orig_epochs=n_orig_epochs, test_epochs=test_epochs,
**dict(zip(['X', 'train_times'], _chunk_data(X, chunk))))
for chunk in chunks)
# Concatenate chunks across test time dimension.
n_tests = [len(sl) for sl in self.test_times_['slices']]
if len(set(n_tests)) == 1: # does GAT deal with a regular array/matrix
self.y_pred_ = np.concatenate(y_pred, axis=1)
else:
# Non regular testing times, y_pred is an array of arrays with
# different lengths.
# FIXME: should do this with numpy operators only
self.y_pred_ = [[test for chunk in train for test in chunk]
for train in map(list, zip(*y_pred))]
return self.y_pred_
def score(self, epochs=None, y=None):
"""Score Epochs
Estimate scores across trials by comparing the prediction estimated for
each trial to its true value.
Calls ``predict()`` if it has not been already.
.. note::
The function updates the ``scorer_``, ``scores_``, and
``y_true_`` attributes.
.. note::
If ``predict_mode`` is 'mean-prediction', ``score_mode`` is
automatically set to 'mean-sample-wise'.
Parameters
----------
epochs : instance of Epochs | None, optional
The epochs. Can be similar to fitted epochs or not.
If None, it needs to rely on the predictions ``y_pred_``
generated with ``predict()``.
y : list | ndarray, shape (n_epochs,) | None, optional
True values to be compared with the predictions ``y_pred_``
generated with ``predict()`` via ``scorer_``.
If None and ``predict_mode``=='cross-validation' y = ``y_train_``.
Returns
-------
scores : list of lists of float
The scores estimated by ``scorer_`` at each training time and each
testing time (e.g. mean accuracy of ``predict(X)``). Note that the
number of testing times per training time need not be regular;
else, np.shape(scores) = (n_train_time, n_test_time). If
``score_mode`` is 'fold-wise', np.shape(scores) = (n_train_time,
n_test_time, n_folds).
"""
import sklearn.metrics
from sklearn.base import is_classifier
from sklearn.metrics import accuracy_score, mean_squared_error
if check_version('sklearn', '0.17'):
from sklearn.base import is_regressor
else:
def is_regressor(clf):
return False
# Run predictions if not already done
if epochs is not None:
self.predict(epochs)
else:
if not hasattr(self, 'y_pred_'):
raise RuntimeError('Please predict() epochs first or pass '
'epochs to score()')
# Check scorer
if self.score_mode not in ('fold-wise', 'mean-fold-wise',
'mean-sample-wise'):
raise ValueError("score_mode must be 'fold-wise', "
"'mean-fold-wise' or 'mean-sample-wise'. "
"Got %s instead'" % self.score_mode)
score_mode = self.score_mode
if (self.predict_mode == 'mean-prediction' and
self.score_mode != 'mean-sample-wise'):
warn("score_mode changed from %s set to 'mean-sample-wise' because"
" predict_mode is 'mean-prediction'." % self.score_mode)
score_mode = 'mean-sample-wise'
self.scorer_ = self.scorer
if self.scorer_ is None:
# Try to guess which scoring metrics should be used
if self.predict_method == "predict":
if is_classifier(self.clf):
self.scorer_ = accuracy_score
elif is_regressor(self.clf):
self.scorer_ = mean_squared_error
elif isinstance(self.scorer_, str):
if hasattr(sklearn.metrics, '%s_score' % self.scorer_):
self.scorer_ = getattr(sklearn.metrics, '%s_score' %
self.scorer_)
else:
raise KeyError("{0} scorer Doesn't appear to be valid a "
"scikit-learn scorer.".format(self.scorer_))
if not self.scorer_:
raise ValueError('Could not find a scoring metric for clf=%s '
' and predict_method=%s. Manually define scorer'
'.' % (self.clf, self.predict_method))
# If no regressor is passed, use default epochs events
if y is None:
if self.predict_mode == 'cross-validation':
y = self.y_train_
else:
if epochs is not None:
y = epochs.events[:, 2]
else:
raise RuntimeError('y is undefined because '
'predict_mode="mean-prediction" and '
'epochs are missing. You need to '
'explicitly specify y.')
if not np.all(np.unique(y) == np.unique(self.y_train_)):
raise ValueError('Classes (y) passed differ from classes used '
'for training. Please explicitly pass your y '
'for scoring.')
elif isinstance(y, list):
y = np.array(y)
# Clean attributes
for att in ['scores_', 'y_true_']:
if hasattr(self, att):
delattr(self, att)
self.y_true_ = y # to be compared with y_pred for scoring
# Preprocessing for parallelization across training times; to avoid
# overheads, we divide them in large chunks.
n_jobs = min(len(self.y_pred_[0][0]), check_n_jobs(self.n_jobs))
parallel, p_func, n_jobs = parallel_func(_score_slices, n_jobs)
n_estimators = len(self.train_times_['slices'])
n_chunks = min(n_estimators, n_jobs)
chunks = np.array_split(range(len(self.train_times_['slices'])),
n_chunks)
scores = parallel(p_func(
self.y_true_, [self.y_pred_[train] for train in chunk],
self.scorer_, score_mode, self._cv_splits)
for chunk in chunks)
# TODO: np.array scores from initialization JRK
self.scores_ = np.array([score for chunk in scores for score in chunk])
return self.scores_
_warn_once = dict()
def _predict_slices(X, train_times, estimators, cv_splits, predict_mode,
predict_method, n_orig_epochs, test_epochs):
"""Aux function of GeneralizationAcrossTime
Run classifiers predictions loop across time samples.
Parameters
----------
X : ndarray, shape (n_epochs, n_features, n_times)
To-be-fitted data.
estimators : list of array-like, shape (n_times, n_folds)
List of array of scikit-learn classifiers fitted in cross-validation.
cv_splits : list of tuples
List of tuples of train and test array generated from cv.
train_times : list
List of list of slices selecting data from X from which is prediction
is generated.
predict_method : str
Specifies prediction method for the estimator.
predict_mode : {'cross-validation', 'mean-prediction'}
Indicates how predictions are achieved with regards to the cross-
validation procedure:
'cross-validation' : estimates a single prediction per sample based
on the unique independent classifier fitted in the cross-
validation.
'mean-prediction' : estimates k predictions per sample, based on
each of the k-fold cross-validation classifiers, and average
these predictions into a single estimate per sample.
Default: 'cross-validation'
n_orig_epochs : int
Original number of predicted epochs before slice definition. Note
that the number of epochs may have been cropped if the cross validation
is not deterministic (e.g. with ShuffleSplit, we may only predict a
subset of epochs).
test_epochs : list of slices
List of slices to select the tested epoched in the cv.
"""
# Check inputs
n_epochs, _, n_times = X.shape
n_train = len(estimators)
n_test = [len(test_t_idxs) for test_t_idxs in train_times]
# Loop across training times (i.e. estimators)
y_pred = None
for train_t_idx, (estimator_cv, test_t_idxs) in enumerate(
zip(estimators, train_times)):
# Checks whether predict is based on contiguous windows of lengths = 1
# time-sample, ranging across the entire times. In this case, we will
# be able to vectorize the testing times samples.
# Set expected start time if window length == 1
start = np.arange(n_times)
contiguous_start = np.array_equal([sl[0] for sl in test_t_idxs], start)
window_lengths = np.unique([len(sl) for sl in test_t_idxs])
vectorize_times = (window_lengths == 1) and contiguous_start
if vectorize_times:
# In vectorize mode, we avoid iterating over time test time indices
test_t_idxs = [slice(start[0], start[-1] + 1, 1)]
elif _warn_once.get('vectorization', True):
# Only warn if multiple testing time
if len(test_t_idxs) > 1:
warn('Due to a time window with length > 1, unable to '
' vectorize across testing times. This leads to slower '
'predictions compared to the length == 1 case.')
_warn_once['vectorization'] = False
# Iterate over testing times. If vectorize_times: 1 iteration.
for ii, test_t_idx in enumerate(test_t_idxs):
# Vectoring chan_times features in case of multiple time samples
# given to the estimators.
X_pred = X
if not vectorize_times:
X_pred = X[:, :, test_t_idx].reshape(n_epochs, -1)
if predict_mode == 'mean-prediction':
# Bagging: predict with each fold's estimator and combine
# predictions.
y_pred_ = _predict(X_pred, estimator_cv,
vectorize_times=vectorize_times,
predict_method=predict_method)
# Initialize y_pred now we know its dimensionality
if y_pred is None:
n_dim = y_pred_.shape[-1]
y_pred = _init_ypred(n_train, n_test, n_orig_epochs, n_dim)
if vectorize_times:
# When vectorizing, we predict multiple time points at once
# to gain speed. The utput predictions thus correspond to
# different test time indices.
y_pred[train_t_idx][test_t_idx] = y_pred_
else:
# Output predictions in a single test time column
y_pred[train_t_idx][ii] = y_pred_
elif predict_mode == 'cross-validation':
# Predict using the estimator corresponding to each fold
for (_, test), test_epoch, estimator in zip(
cv_splits, test_epochs, estimator_cv):
if test.size == 0: # see issue #2788
continue
y_pred_ = _predict(X_pred[test_epoch], [estimator],
vectorize_times=vectorize_times,
predict_method=predict_method)
# Initialize y_pred now we know its dimensionality
if y_pred is None:
n_dim = y_pred_.shape[-1]
y_pred = _init_ypred(n_train, n_test, n_orig_epochs,
n_dim)
if vectorize_times:
# When vectorizing, we predict multiple time points at
# once to gain speed. The output predictions thus
# correspond to different test_t_idx columns.
y_pred[train_t_idx][test_t_idx, test, ...] = y_pred_
else:
# Output predictions in a single test_t_idx column
y_pred[train_t_idx][ii, test, ...] = y_pred_
return y_pred
def _init_ypred(n_train, n_test, n_orig_epochs, n_dim):
"""Initialize the predictions for each train/test time points.
Parameters
----------
n_train : int
Number of training time point (i.e. estimators)
n_test : list of int
List of number of testing time points for each estimator.
n_orig_epochs : int
Number of epochs passed to gat.predict()
n_dim : int
Number of dimensionality of y_pred. See np.shape(clf.predict(X))
Returns
-------
y_pred : np.array, shape(n_train, n_test, n_orig_epochs, n_dim)
Empty array.
Notes
-----
The ``y_pred`` variable can only be initialized after the first
prediction, because we can't know whether it is a a categorical output or a
set of probabilistic estimates. If all train time points have the same
number of testing time points, then y_pred is a matrix. Else it is an array
of arrays.
"""
if len(set(n_test)) == 1:
y_pred = np.empty((n_train, n_test[0], n_orig_epochs, n_dim))
else:
y_pred = np.array([np.empty((this_n, n_orig_epochs, n_dim))
for this_n in n_test])
return y_pred
def _score_slices(y_true, list_y_pred, scorer, score_mode, cv):
"""Aux function of GeneralizationAcrossTime that loops across chunks of
testing slices.
"""
scores_list = list()
for y_pred in list_y_pred:
scores = list()
for t, this_y_pred in enumerate(y_pred):
if score_mode in ['mean-fold-wise', 'fold-wise']:
# Estimate score within each fold
scores_ = list()
for train, test in cv:
scores_.append(scorer(y_true[test], this_y_pred[test]))
scores_ = np.array(scores_)
# Summarize score as average across folds
if score_mode == 'mean-fold-wise':
scores_ = np.mean(scores_, axis=0)
elif score_mode == 'mean-sample-wise':
# Estimate score across all y_pred without cross-validation.
scores_ = scorer(y_true, this_y_pred)
scores.append(scores_)
scores_list.append(scores)
return scores_list
def _check_epochs_input(epochs, y, picks=None):
"""Aux function of GeneralizationAcrossTime
Format MNE data into scikit-learn X and y.
Parameters
----------
epochs : instance of Epochs
The epochs.
y : ndarray shape (n_epochs) | list shape (n_epochs) | None
To-be-fitted model. If y is None, y == epochs.events.
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
Returns
-------
X : ndarray, shape (n_epochs, n_selected_chans, n_times)
To-be-fitted data.
y : ndarray, shape (n_epochs,)
To-be-fitted model.
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
"""
if y is None:
y = epochs.events[:, 2]
elif isinstance(y, list):
y = np.array(y)
# Convert MNE data into trials x features x time matrix
X = epochs.get_data()
# Pick channels
if picks is None: # just use good data channels
picks = _pick_data_channels(epochs.info, with_ref_meg=False)
if isinstance(picks, (list, np.ndarray)):
picks = np.array(picks, dtype=np.int)
else:
raise ValueError('picks must be a list or a numpy.ndarray of int')
X = X[:, picks, :]
# Check data sets
assert X.shape[0] == y.shape[0]
return X, y, picks
def _fit_slices(clf, x_chunk, y, slices, cv_splits):
"""Aux function of GeneralizationAcrossTime
Fit each classifier.
Parameters
----------
clf : scikit-learn classifier
The classifier object.
x_chunk : ndarray, shape (n_epochs, n_features, n_times)
To-be-fitted data.
y : list | array, shape (n_epochs,)
To-be-fitted model.
slices : list | array, shape (n_training_slice,)
List of training slices, indicating time sample relative to X
cv_splits : list of tuples
List of (train, test) tuples generated from cv.split()
Returns
-------
estimators : list of lists of estimators
List of fitted scikit-learn classifiers corresponding to each training
slice.
"""
from sklearn.base import clone
# Initialize
n_epochs = len(x_chunk)
estimators = list()
# Identify the time samples of X_chunck corresponding to X
values = np.unique([val for sl in slices for val in sl])
# Loop across time slices
for t_slice in slices:
# Translate absolute time samples into time sample relative to x_chunk
t_slice = np.array([np.where(ii == values)[0][0] for ii in t_slice])
# Select slice
X = x_chunk[..., t_slice]
# Reshape data matrix to flatten features if multiple time samples.
X = X.reshape(n_epochs, np.prod(X.shape[1:]))
# Loop across folds
estimators_ = list()
for fold, (train, test) in enumerate(cv_splits):
# Fit classifier
clf_ = clone(clf)
clf_.fit(X[train, :], y[train])
estimators_.append(clf_)
# Store classifier
estimators.append(estimators_)
return estimators
def _sliding_window(times, window, sfreq):
"""Aux function of GeneralizationAcrossTime
Define the slices on which to train each classifier. The user either define
the time slices manually in window['slices'] or s/he passes optional params
to set them from window['start'], window['stop'], window['step'] and
window['length'].
Parameters
----------
times : ndarray, shape (n_times,)
Array of times from MNE epochs.
window : dict keys: ('start', 'stop', 'step', 'length')
Either train or test times.
Returns
-------
window : dict
Dictionary to set training and testing times.
See Also
--------
GeneralizationAcrossTime
"""
import copy
window = _DecodingTime(copy.deepcopy(window))
# Default values
time_slices = window.get('slices', None)
# If the user hasn't manually defined the time slices, we'll define them
# with ``start``, ``stop``, ``step`` and ``length`` parameters.
if time_slices is None:
window['start'] = window.get('start', times[0])
window['stop'] = window.get('stop', times[-1])
window['step'] = window.get('step', 1. / sfreq)
window['length'] = window.get('length', 1. / sfreq)
if not (times[0] <= window['start'] <= times[-1]):
raise ValueError(
'start (%.2f s) outside time range [%.2f, %.2f].' % (
window['start'], times[0], times[-1]))
if not (times[0] <= window['stop'] <= times[-1]):
raise ValueError(
'stop (%.2f s) outside time range [%.2f, %.2f].' % (
window['stop'], times[0], times[-1]))
if window['step'] < 1. / sfreq:
raise ValueError('step must be >= 1 / sampling_frequency')
if window['length'] < 1. / sfreq:
raise ValueError('length must be >= 1 / sampling_frequency')
if window['length'] > np.ptp(times):
raise ValueError('length must be <= time range')
# Convert seconds to index
def find_t_idx(t): # find closest time point
return np.argmin(np.abs(np.asarray(times) - t))
start = find_t_idx(window['start'])
stop = find_t_idx(window['stop'])
step = int(round(window['step'] * sfreq))
length = int(round(window['length'] * sfreq))
# For each training slice, give time samples to be included
time_slices = [range(start, start + length)]
while (time_slices[-1][0] + step) <= (stop - length + 1):
start = time_slices[-1][0] + step
time_slices.append(range(start, start + length))
window['slices'] = time_slices
window['times'] = _set_window_time(window['slices'], times)
return window
def _set_window_time(slices, times):
"""Aux function to define time as the last training time point"""
t_idx_ = [t[-1] for t in slices]
return times[t_idx_]
def _predict(X, estimators, vectorize_times, predict_method):
"""Aux function of GeneralizationAcrossTime
Predict each classifier. If multiple classifiers are passed, average
prediction across all classifiers to result in a single prediction per
classifier.
Parameters
----------
estimators : ndarray, shape (n_folds,) | shape (1,)
Array of scikit-learn classifiers to predict data.
X : ndarray, shape (n_epochs, n_features, n_times)
To-be-predicted data
vectorize_times : bool
If True, X can be vectorized to predict all times points at once
predict_method : str
Name of the method used to make predictions from the estimator. For
example, both `predict_proba` and `predict` are supported for
sklearn.linear_model.LogisticRegression. Note that the scorer must be
adapted to the prediction outputs of the method. Defaults to 'predict'.
Returns
-------
y_pred : ndarray, shape (n_epochs, m_prediction_dimensions)
Classifier's prediction for each trial.
"""
from scipy import stats
from sklearn.base import is_classifier
# Initialize results:
orig_shape = X.shape
n_epochs = orig_shape[0]
n_times = orig_shape[-1]
n_clf = len(estimators)
# in simple case, we are predicting each time sample as if it
# was a different epoch
if vectorize_times: # treat times as trials for optimization
X = np.hstack(X).T # XXX JRK: still 17% of cpu time
n_epochs_tmp = len(X)
# Compute prediction for each sub-estimator (i.e. per fold)
# if independent, estimators = all folds
for fold, clf in enumerate(estimators):
_y_pred = getattr(clf, predict_method)(X)
# See inconsistency in dimensionality: scikit-learn/scikit-learn#5058
if _y_pred.ndim == 1:
_y_pred = _y_pred[:, None]
# initialize predict_results array
if fold == 0:
predict_size = _y_pred.shape[1]
y_pred = np.ones((n_epochs_tmp, predict_size, n_clf))
y_pred[:, :, fold] = _y_pred
# Bagging: Collapse y_pred across folds if necessary (i.e. if independent)
# XXX need API to identify how multiple predictions can be combined?
if fold > 0:
if is_classifier(clf) and (predict_method == 'predict'):
y_pred, _ = stats.mode(y_pred, axis=2)
else:
y_pred = np.mean(y_pred, axis=2, keepdims=True)
y_pred = y_pred[:, :, 0]
# Format shape
if vectorize_times:
shape = [n_epochs, n_times, y_pred.shape[-1]]
y_pred = y_pred.reshape(shape).transpose([1, 0, 2])
return y_pred
class GeneralizationAcrossTime(_GeneralizationAcrossTime):
"""Generalize across time and conditions
Creates an estimator object used to 1) fit a series of classifiers on
multidimensional time-resolved data, and 2) test the ability of each
classifier to generalize across other time samples, as in [1]_.
Parameters
----------
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
cv : int | object
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
scikit-learn.model_selection module for the list of possible objects.
If clf is a classifier, defaults to StratifiedKFold(n_folds=5), else
defaults to KFold(n_folds=5).
clf : object | None
An estimator compliant with the scikit-learn API (fit & predict).
If None the classifier will be a standard pipeline including
StandardScaler and LogisticRegression with default parameters.
train_times : dict | None
A dictionary to configure the training times:
* ``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
* ``start`` : float
Time at which to start decoding (in seconds).
Defaults to min(epochs.times).
* ``stop`` : float
Maximal time at which to stop decoding (in seconds).
Defaults to max(times).
* ``step`` : float
Duration separating the start of subsequent classifiers (in
seconds). Defaults to one time sample.
* ``length`` : float
Duration of each classifier (in seconds).
Defaults to one time sample.
If None, empty dict.
test_times : 'diagonal' | dict | None, optional
Configures the testing times.
If set to 'diagonal', predictions are made at the time at which
each classifier is trained.
If set to None, predictions are made at all time points.
If set to dict, the dict should contain ``slices`` or be contructed in
a similar way to train_times:
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
If None, empty dict.
predict_method : str
Name of the method used to make predictions from the estimator. For
example, both `predict_proba` and `predict` are supported for
sklearn.linear_model.LogisticRegression. Note that the scorer must be
adapted to the prediction outputs of the method. Defaults to 'predict'.
predict_mode : {'cross-validation', 'mean-prediction'}
Indicates how predictions are achieved with regards to the cross-
validation procedure:
* ``cross-validation`` : estimates a single prediction per sample
based on the unique independent classifier fitted in the
cross-validation.
* ``mean-prediction`` : estimates k predictions per sample, based
on each of the k-fold cross-validation classifiers, and average
these predictions into a single estimate per sample.
Defaults to 'cross-validation'.
scorer : object | None | str
scikit-learn Scorer instance or str type indicating the name of the
scorer such as ``accuracy``, ``roc_auc``. If None, set to ``accuracy``.
score_mode : {'fold-wise', 'mean-fold-wise', 'mean-sample-wise'}
Determines how the scorer is estimated:
* ``fold-wise`` : returns the score obtained in each fold.
* ``mean-fold-wise`` : returns the average of the fold-wise scores.
* ``mean-sample-wise`` : returns score estimated across across all
y_pred independently of the cross-validation. This method is
faster than ``mean-fold-wise`` but less conventional, use at
your own risk.
Defaults to 'mean-fold-wise'.
n_jobs : int
Number of jobs to run in parallel. Defaults to 1.
Attributes
----------
``picks_`` : array-like of int | None
The channels indices to include.
ch_names : list, array-like, shape (n_channels,)
Names of the channels used for training.
``y_train_`` : list | ndarray, shape (n_samples,)
The categories used for training.
``train_times_`` : dict
A dictionary that configures the training times:
* ``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
* ``times`` : ndarray, shape (n_clfs,)
The training times (in seconds).
``test_times_`` : dict
A dictionary that configures the testing times for each training time:
``slices`` : ndarray, shape (n_clfs, n_testing_times)
Array of time slices (in indices) used for each classifier.
``times`` : ndarray, shape (n_clfs, n_testing_times)
The testing times (in seconds) for each training time.
``cv_`` : CrossValidation object
The actual CrossValidation input depending on y.
``estimators_`` : list of list of scikit-learn.base.BaseEstimator subclasses.
The estimators for each time point and each fold.
``y_pred_`` : list of lists of arrays of floats, shape (n_train_times, n_test_times, n_epochs, n_prediction_dims)
The single-trial predictions estimated by self.predict() at each
training time and each testing time. Note that the number of testing
times per training time need not be regular, else
``np.shape(y_pred_) = (n_train_time, n_test_time, n_epochs).``
``y_true_`` : list | ndarray, shape (n_samples,)
The categories used for scoring ``y_pred_``.
``scorer_`` : object
scikit-learn Scorer instance.
``scores_`` : list of lists of float
The scores estimated by ``self.scorer_`` at each training time and each
testing time (e.g. mean accuracy of self.predict(X)). Note that the
number of testing times per training time need not be regular;
else, ``np.shape(scores) = (n_train_time, n_test_time)``.
See Also
--------
TimeDecoding
References
----------
.. [1] Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
and Stanislas Dehaene, "Two distinct dynamic modes subtend the
detection of unexpected sounds", PLoS ONE, 2014
DOI: 10.1371/journal.pone.0085791
.. versionadded:: 0.9.0
""" # noqa
def __init__(self, picks=None, cv=5, clf=None, train_times=None,
test_times=None, predict_method='predict',
predict_mode='cross-validation', scorer=None,
score_mode='mean-fold-wise', n_jobs=1):
super(GeneralizationAcrossTime, self).__init__(
picks=picks, cv=cv, clf=clf, train_times=train_times,
test_times=test_times, predict_method=predict_method,
predict_mode=predict_mode, scorer=scorer, score_mode=score_mode,
n_jobs=n_jobs)
def __repr__(self):
s = ''
if hasattr(self, "estimators_"):
s += "fitted, start : %0.3f (s), stop : %0.3f (s)" % (
self.train_times_.get('start', np.nan),
self.train_times_.get('stop', np.nan))
else:
s += 'no fit'
if hasattr(self, 'y_pred_'):
s += (", predicted %d epochs" % len(self.y_pred_[0][0]))
else:
s += ", no prediction"
if hasattr(self, "estimators_") and hasattr(self, 'scores_'):
s += ',\n '
else:
s += ', '
if hasattr(self, 'scores_'):
s += "scored"
if callable(self.scorer_):
s += " (%s)" % (self.scorer_.__name__)
else:
s += "no score"
return "<GAT | %s>" % s
def plot(self, title=None, vmin=None, vmax=None, tlim=None, ax=None,
cmap='RdBu_r', show=True, colorbar=True,
xlabel=True, ylabel=True):
"""Plotting function of GeneralizationAcrossTime object
Plot the score of each classifier at each tested time window.
Parameters
----------
title : str | None
Figure title.
vmin : float | None
Min color value for scores. If None, sets to min(``gat.scores_``).
vmax : float | None
Max color value for scores. If None, sets to max(``gat.scores_``).
tlim : ndarray, (train_min, test_max) | None
The time limits used for plotting.
ax : object | None
Plot pointer. If None, generate new figure.
cmap : str | cmap object
The color map to be used. Defaults to ``'RdBu_r'``.
show : bool
If True, the figure will be shown. Defaults to True.
colorbar : bool
If True, the colorbar of the figure is displayed. Defaults to True.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
return plot_gat_matrix(self, title=title, vmin=vmin, vmax=vmax,
tlim=tlim, ax=ax, cmap=cmap, show=show,
colorbar=colorbar, xlabel=xlabel, ylabel=ylabel)
def plot_diagonal(self, title=None, xmin=None, xmax=None, ymin=None,
ymax=None, ax=None, show=True, color=None,
xlabel=True, ylabel=True, legend=True, chance=True,
label='Classif. score'):
"""Plotting function of GeneralizationAcrossTime object
Plot each classifier score trained and tested at identical time
windows.
Parameters
----------
title : str | None
Figure title.
xmin : float | None, optional
Min time value.
xmax : float | None, optional
Max time value.
ymin : float | None, optional
Min score value. If None, sets to min(scores).
ymax : float | None, optional
Max score value. If None, sets to max(scores).
ax : object | None
Instance of mataplotlib.axes.Axis. If None, generate new figure.
show : bool
If True, the figure will be shown. Defaults to True.
color : str
Score line color.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float. Defaults to None
Plot chance level. If True, chance level is estimated from the type
of scorer.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
return plot_gat_times(self, train_time='diagonal', title=title,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax, ax=ax, show=show,
color=color, xlabel=xlabel, ylabel=ylabel,
legend=legend, chance=chance, label=label)
def plot_times(self, train_time, title=None, xmin=None, xmax=None,
ymin=None, ymax=None, ax=None, show=True, color=None,
xlabel=True, ylabel=True, legend=True, chance=True,
label='Classif. score'):
"""Plotting function of GeneralizationAcrossTime object
Plot the scores of the classifier trained at specific training time(s).
Parameters
----------
train_time : float | list or array of float
Plots scores of the classifier trained at train_time.
title : str | None
Figure title.
xmin : float | None, optional
Min time value.
xmax : float | None, optional
Max time value.
ymin : float | None, optional
Min score value. If None, sets to min(scores).
ymax : float | None, optional
Max score value. If None, sets to max(scores).
ax : object | None
Instance of mataplotlib.axes.Axis. If None, generate new figure.
show : bool
If True, the figure will be shown. Defaults to True.
color : str or list of str
Score line color(s).
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float.
Plot chance level. If True, chance level is estimated from the type
of scorer.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
if not np.array(train_time).dtype is np.dtype('float'):
raise ValueError('train_time must be float | list or array of '
'floats. Got %s.' % type(train_time))
return plot_gat_times(self, train_time=train_time, title=title,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax, ax=ax, show=show,
color=color, xlabel=xlabel, ylabel=ylabel,
legend=legend, chance=chance, label=label)
class TimeDecoding(_GeneralizationAcrossTime):
"""Train and test a series of classifiers at each time point to obtain a
score across time.
Parameters
----------
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
cv : int | object
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
scikit-learn.model_selection module for the list of possible objects.
If clf is a classifier, defaults to StratifiedKFold(n_folds=5), else
defaults to KFold(n_folds=5).
clf : object | None
An estimator compliant with the scikit-learn API (fit & predict).
If None the classifier will be a standard pipeline including
StandardScaler and a Logistic Regression with default parameters.
times : dict | None
A dictionary to configure the training times:
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
``start`` : float
Time at which to start decoding (in seconds). By default,
min(epochs.times).
``stop`` : float
Maximal time at which to stop decoding (in seconds). By
default, max(times).
``step`` : float
Duration separating the start of subsequent classifiers (in
seconds). By default, equals one time sample.
``length`` : float
Duration of each classifier (in seconds). By default, equals
one time sample.
If None, empty dict.
predict_method : str
Name of the method used to make predictions from the estimator. For
example, both `predict_proba` and `predict` are supported for
sklearn.linear_model.LogisticRegression. Note that the scorer must be
adapted to the prediction outputs of the method. Defaults to 'predict'.
predict_mode : {'cross-validation', 'mean-prediction'}
Indicates how predictions are achieved with regards to the cross-
validation procedure:
* ``cross-validation`` : estimates a single prediction per sample
based on the unique independent classifier fitted in the
cross-validation.
* ``mean-prediction`` : estimates k predictions per sample, based
on each of the k-fold cross-validation classifiers, and average
these predictions into a single estimate per sample.
Defaults to 'cross-validation'.
scorer : object | None | str
scikit-learn Scorer instance or str type indicating the name of the
scorer such as ``accuracy``, ``roc_auc``. If None, set to ``accuracy``.
score_mode : {'fold-wise', 'mean-fold-wise', 'mean-sample-wise'}
Determines how the scorer is estimated:
* ``fold-wise`` : returns the score obtained in each fold.
* ``mean-fold-wise`` : returns the average of the fold-wise scores.
* ``mean-sample-wise`` : returns score estimated across across all
y_pred independently of the cross-validation. This method is
faster than ``mean-fold-wise`` but less conventional, use at
your own risk.
Defaults to 'mean-fold-wise'.
n_jobs : int
Number of jobs to run in parallel. Defaults to 1.
Attributes
----------
``picks_`` : array-like of int | None
The channels indices to include.
ch_names : list, array-like, shape (n_channels,)
Names of the channels used for training.
``y_train_`` : ndarray, shape (n_samples,)
The categories used for training.
``times_`` : dict
A dictionary that configures the training times:
* ``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
* ``times`` : ndarray, shape (n_clfs,)
The training times (in seconds).
``cv_`` : CrossValidation object
The actual CrossValidation input depending on y.
``estimators_`` : list of list of scikit-learn.base.BaseEstimator subclasses.
The estimators for each time point and each fold.
``y_pred_`` : ndarray, shape (n_times, n_epochs, n_prediction_dims)
Class labels for samples in X.
``y_true_`` : list | ndarray, shape (n_samples,)
The categories used for scoring ``y_pred_``.
``scorer_`` : object
scikit-learn Scorer instance.
``scores_`` : list of float, shape (n_times,)
The scores (mean accuracy of self.predict(X) wrt. y.).
See Also
--------
GeneralizationAcrossTime
Notes
-----
The function is equivalent to the diagonal of GeneralizationAcrossTime()
.. versionadded:: 0.10
""" # noqa
def __init__(self, picks=None, cv=5, clf=None, times=None,
predict_method='predict', predict_mode='cross-validation',
scorer=None, score_mode='mean-fold-wise', n_jobs=1):
super(TimeDecoding, self).__init__(picks=picks, cv=cv, clf=clf,
train_times=times,
test_times='diagonal',
predict_method=predict_method,
predict_mode=predict_mode,
scorer=scorer,
score_mode=score_mode,
n_jobs=n_jobs)
self._clean_times()
def __repr__(self):
s = ''
if hasattr(self, "estimators_"):
s += "fitted, start : %0.3f (s), stop : %0.3f (s)" % (
self.times_.get('start', np.nan),
self.times_.get('stop', np.nan))
else:
s += 'no fit'
if hasattr(self, 'y_pred_'):
s += (", predicted %d epochs" % len(self.y_pred_[0]))
else:
s += ", no prediction"
if hasattr(self, "estimators_") and hasattr(self, 'scores_'):
s += ',\n '
else:
s += ', '
if hasattr(self, 'scores_'):
s += "scored"
if callable(self.scorer_):
s += " (%s)" % (self.scorer_.__name__)
else:
s += "no score"
return "<TimeDecoding | %s>" % s
def fit(self, epochs, y=None):
"""Train a classifier on each specified time slice.
.. note::
This function sets the ``picks_``, ``ch_names``, ``cv_``,
``y_train``, ``train_times_`` and ``estimators_`` attributes.
Parameters
----------
epochs : instance of Epochs
The epochs.
y : list or ndarray of int, shape (n_samples,) or None, optional
To-be-fitted model values. If None, y = epochs.events[:, 2].
Returns
-------
self : TimeDecoding
Returns fitted TimeDecoding object.
Notes
-----
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
self._prep_times()
super(TimeDecoding, self).fit(epochs, y=y)
self._clean_times()
return self
def predict(self, epochs):
"""Test each classifier on each specified testing time slice.
.. note::
This function sets the ``y_pred_`` and ``test_times_`` attributes.
Parameters
----------
epochs : instance of Epochs
The epochs. Can be similar to fitted epochs or not. See
predict_mode parameter.
Returns
-------
y_pred : list of lists of arrays of floats, shape (n_times, n_epochs, n_prediction_dims)
The single-trial predictions at each time sample.
""" # noqa
self._prep_times()
super(TimeDecoding, self).predict(epochs)
self._clean_times()
return self.y_pred_
def score(self, epochs=None, y=None):
"""Score Epochs
Estimate scores across trials by comparing the prediction estimated for
each trial to its true value.
Calls ``predict()`` if it has not been already.
.. note::
The function updates the ``scorer_``, ``scores_``, and
``y_true_`` attributes.
.. note::
If ``predict_mode`` is 'mean-prediction', ``score_mode`` is
automatically set to 'mean-sample-wise'.
Parameters
----------
epochs : instance of Epochs | None, optional
The epochs. Can be similar to fitted epochs or not.
If None, it needs to rely on the predictions ``y_pred_``
generated with ``predict()``.
y : list | ndarray, shape (n_epochs,) | None, optional
True values to be compared with the predictions ``y_pred_``
generated with ``predict()`` via ``scorer_``.
If None and ``predict_mode``=='cross-validation' y = ``y_train_``.
Returns
-------
scores : list of float, shape (n_times,)
The scores estimated by ``scorer_`` at each time sample (e.g. mean
accuracy of ``predict(X)``).
"""
if epochs is not None:
self.predict(epochs)
else:
if not hasattr(self, 'y_pred_'):
raise RuntimeError('Please predict() epochs first or pass '
'epochs to score()')
self._prep_times()
super(TimeDecoding, self).score(epochs=None, y=y)
self._clean_times()
return self.scores_
def plot(self, title=None, xmin=None, xmax=None, ymin=None, ymax=None,
ax=None, show=True, color=None, xlabel=True, ylabel=True,
legend=True, chance=True, label='Classif. score'):
"""Plotting function
Predict each classifier. If multiple classifiers are passed, average
prediction across all classifiers to result in a single prediction per
classifier.
Parameters
----------
title : str | None
Figure title.
xmin : float | None, optional,
Min time value.
xmax : float | None, optional,
Max time value.
ymin : float
Min score value. Defaults to 0.
ymax : float
Max score value. Defaults to 1.
ax : object | None
Instance of mataplotlib.axes.Axis. If None, generate new figure.
show : bool
If True, the figure will be shown. Defaults to True.
color : str
Score line color. Defaults to 'steelblue'.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float. Defaults to None
Plot chance level. If True, chance level is estimated from the type
of scorer.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
# XXX JRK: need cleanup in viz
self._prep_times()
fig = plot_gat_times(self, train_time='diagonal', title=title,
xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, ax=ax,
show=show, color=color, xlabel=xlabel,
ylabel=ylabel, legend=legend, chance=chance,
label=label)
self._clean_times()
return fig
def _prep_times(self):
"""Auxiliary function to allow compatibility with GAT"""
self.test_times = 'diagonal'
if hasattr(self, 'times'):
self.train_times = self.times
if hasattr(self, 'times_'):
self.train_times_ = self.times_
self.test_times_ = _DecodingTime()
self.test_times_['slices'] = [[slic] for slic in
self.train_times_['slices']]
self.test_times_['times'] = [[tim] for tim in
self.train_times_['times']]
if hasattr(self, 'scores_'):
self.scores_ = [[score] for score in self.scores_]
if hasattr(self, 'y_pred_'):
self.y_pred_ = [[y_pred] for y_pred in self.y_pred_]
def _clean_times(self):
"""Auxiliary function to allow compatibility with GAT"""
if hasattr(self, 'train_times'):
self.times = self.train_times
if hasattr(self, 'train_times_'):
self.times_ = self.train_times_
for attr in ['test_times', 'train_times',
'test_times_', 'train_times_']:
if hasattr(self, attr):
delattr(self, attr)
if hasattr(self, 'y_pred_'):
self.y_pred_ = [y_pred[0] for y_pred in self.y_pred_]
if hasattr(self, 'scores_'):
self.scores_ = [score[0] for score in self.scores_]
def _chunk_data(X, slices):
"""Smart chunking to avoid memory overload.
The parallelization is performed across time samples. To avoid overheads,
the X data is splitted into large chunks of different time sizes. To
avoid duplicating the memory load to each job, we only pass the time
samples that are required by each job. The indices of the training times
must be adjusted accordingly.
"""
# from object array to list
slices = [sl for sl in slices if len(sl)]
selected_times = np.hstack([np.ravel(sl) for sl in slices])
start = np.min(selected_times)
stop = np.max(selected_times) + 1
slices_chunk = [sl - start for sl in slices]
X_chunk = X[:, :, start:stop]
return X_chunk, slices_chunk
|
bsd-3-clause
|
rhyswhitley/phendulum
|
src/model_plotting.py
|
1
|
7543
|
#!/usr/bin/env python
import datetime, time, re
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import gridspec
# load own modules
import spring_dynamics as _sd
__author__ = 'Rhys Whitley, Douglas Kelley, Martin De Kauwe'
__email__ = '[email protected]'
__created__ = datetime.datetime(2015,1,14)
__modified__ = time.strftime("%c")
__version__ = '1.0'
__status__ = 'prototype'
# Plot functions
class model_plotting(object):
def __init__(self, fig_path):
"""
Initialise class for all necessary plotting requirements
variables are set and stored here for easy access and control
over all plotting
"""
self.xlabel = "SWC_smooth"
self.ylabel = "NDVI_grass"
self.fpath = fig_path
self.outcol = "#000000"
self.col = ['#DC143C','#4169E1','#3CB371']
self.lab = ['ensemble','in','out']
def plot_allSite_forcing(self, f_mod, data_list, par_list):
"""
Creates a PDF whose pages contain the results that describes the
environmental forcing at each site, as well as the forcing based on
out-of-site sampling and as an ensemble of all sites.
"""
with PdfPages(self.fpath+'environ_forcing.pdf') as pdf:
# plot all site points as a reference to the ensemble and out-of-sampling fits
# plot the optimised site-specific forcing
for data_i, par_i in zip(data_list, par_list):
[ self._plot_data(d) for d in data_list ]
self._plot_forcing( data_i, par_i, f_mod, pdf )
def _plot_data(self, data):
""" Wrapper on a normal plot for preset data illustration """
plt.plot( data[self.xlabel], data[self.ylabel], 'x', color=self.outcol, alpha=0.3 )
def _plot_models(self, xs, fxs, color, label ):
""" Wrapper on a normal plot for preset model illustration """
plt.plot( xs, fxs, linestyle='-', lw=5, color=color, label=label, alpha=0.5 )
def _plot_forcing(self, data, k_var, f_mod, pobj):
"""
Plots the three different expressions of environmental forcing based on
the three types of sampling used in the analysis
"""
# Create vectors for model fits
xs = np.arange(0,0.5,1e-3)
site_title = set(data["Site"]).pop()
plt.plot( data[self.xlabel], data[self.ylabel], 'o', color='black', ms=8 )
[ self._plot_models( xs, f_mod(ks, xs), color=self.col[i], label=self.lab[i] ) for i, ks in enumerate(np.array(k_var)) ]
plt.xlabel(r'$\theta_{s 10cm}$', fontsize=18)
plt.ylabel(r'NDVI$_{grass}$')
plt.axis([0,0.32,-0.05,0.6])
plt.legend(loc=2)
plt.title(site_title)
pobj.savefig()
plt.close()
def create_title(self, string):
""" Splits the site label into two words based on capital letters"""
return re.sub(r"(\w)([A-Z])", r"\1 \2", string)
def plot_data_manipulation(self, data_list):
"""
Creates a PDF whose pages contain the results that describes the
environmental forcing at each site, as well as the forcing based on
out-of-site sampling and as an ensemble of all sites.
"""
with PdfPages(self.fpath+'soilwater.pdf') as pdf:
[ self._plot_moving_average(d, pdf) for d in data_list ]
with PdfPages(self.fpath+'ndvi_timeseries.pdf') as pdf:
[ self._plot_inflexion_points(d, pdf) for d in data_list ]
def _plot_moving_average(self, data, pobj):
"""
Adds a Gaussian filter to the soil water content time-seires to
elucidate major turning points that align with the NDVI time-series.
"""
xraw = data["SWC10"]
xmes = data["SWC_smooth"]
site_title = set(data["Site"]).pop()
fig, (ax1,ax2) = plt.subplots(2, 1, sharex=True)
ax1.plot( xraw, '-', color='pink' )
ax1.plot( xmes, linestyle='-', color='red', lw=2)
ax2.plot( xraw-xmes, '.', color='pink' )
ax1.set_ylabel(r'$\theta_{s}$', fontsize=18)
ax2.set_ylabel(r'$\sigma$', fontsize=18)
ax2.set_xlabel('Days since ' + data['DT'][0])
ax1.set_title(site_title)
pobj.savefig()
plt.close()
def _plot_inflexion_points(self, data, pobj):
yraw = data["NDVI250X"]
ymes = data["NDVI_grass"]
yd1 = data["dy1/dt1"]
yd2 = data["dy2/dt2"]
site_title = set(data["Site"]).pop()
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.plot(yraw, '-', color='lightblue', label='Total')
ax1.plot(ymes, '-', color='blue', lw=2)
ax2.plot(yd1, color='red', lw=2, label="$dx/dt$" )
ax2.plot(yd2, color='blue', lw=2, label="$d^2x/dt^2$" )
ax1.set_ylabel('NDVI signal')
ax2.set_ylabel(r'$x_{t+1}-x_{t}$', fontsize=16)
ax2.set_xlabel('Days since ' + data['DT'][0])
ax2.axis([1,len(ymes),-6e-3,6e-3])
ax2.legend(loc=1)
ax1.set_title(site_title)
pobj.savefig()
plt.close()
#================================================================================
# Pendulum
#================================================================================
def plot_allSite_pendulum(self, data_list, par_list, f_mod):
"""
Creates a PDF whose pages contain the results that describes the
environmental forcing at each site, as well as the forcing based on
out-of-site sampling and as an ensemble of all sites.
"""
with PdfPages(self.fpath+'phendulum.pdf') as pdf:
# plot all site points as a reference to the ensemble and out-of-sampling fits
# plot the optimised site-specific forcing
for data, par in zip(data_list, par_list):
self._plot_pendulum( data, par, f_mod, pdf )
def _plot_pendulum(self, data, kvar, f_mod, pobj):
# now assign the optimised coefficients to the pendulum and calculate the motion
y_grass = data["NDVI_grass"]
x_mes = data["SWC10"]
# based on the number of samplings get the prediction of motion
springs = [ _sd.spring(k, x_mes, f_mod) for k in np.array(kvar) ]
y_mod = [ p.calc_dynamics()['x'] for p in springs ]
force_mod = [ f.calc_dynamics()['Fe'] for f in springs ]
accel_mod = [ a.calc_dynamics()['a'] for a in springs ]
veloc_mod = [ v.calc_dynamics()['v'] for v in springs ]
site_title = set(data["Site"]).pop()
fig = plt.figure(figsize=(10,7))
# setup plotting grid
gs = gridspec.GridSpec(2, 1, height_ratios=[2,1])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1], sharex=ax1)
# remove xlabels on the second and third plots
plt.setp(ax1.get_xticklabels(), visible=False)
# plot data
ax1.plot( y_grass, color='black', lw=2, label="MODIS" )
[ ax1.plot( y_mod[i], lw=2, alpha=0.8, color=self.col[i], label=self.lab[i] ) for i, ks in enumerate(y_mod) ]
#ax1.plot( y_mod, color='red', lw=2, label="Pendulum" )
ax2.plot( x_mes, color=self.col[1], alpha=0.8, lw=1.5 )
# labels
ax1.set_ylabel( r"NDVI", fontsize=14 )
ax2.set_ylabel( r"$\theta_{s10cm}$", fontsize=18 )
# legends
ax1.legend(loc=1)
ax1.set_title(site_title, size=20)
gs.tight_layout(fig, rect=[0, 0, 1, 0.97])
pobj.savefig()
plt.close()
|
apache-2.0
|
ben-hopps/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/projections/__init__.py
|
69
|
2179
|
from geo import AitoffAxes, HammerAxes, LambertAxes
from polar import PolarAxes
from matplotlib import axes
class ProjectionRegistry(object):
"""
Manages the set of projections available to the system.
"""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""
Register a new set of projection(s).
"""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""
Get a projection class from its *name*.
"""
return self._all_projection_types[name]
def get_projection_names(self):
"""
Get a list of the names of all projections currently
registered.
"""
names = self._all_projection_types.keys()
names.sort()
return names
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is
returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError:
raise ValueError("Unknown projection '%s'" % projection)
def projection_factory(projection, figure, rect, **kwargs):
"""
Get a new projection instance.
*projection* is a projection name.
*figure* is a figure to add the axes to.
*rect* is a :class:`~matplotlib.transforms.Bbox` object specifying
the location of the axes within the figure.
Any other kwargs are passed along to the specific projection
constructor being used.
"""
return get_projection_class(projection)(figure, rect, **kwargs)
def get_projection_names():
"""
Get a list of acceptable projection names.
"""
return projection_registry.get_projection_names()
|
agpl-3.0
|
anurag313/scikit-learn
|
sklearn/svm/tests/test_sparse.py
|
70
|
12992
|
from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
|
bsd-3-clause
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/pandas/computation/ops.py
|
9
|
15234
|
"""Operator classes for eval.
"""
import operator as op
from functools import partial
from datetime import datetime
import numpy as np
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
from pandas.core.base import StringMixin
from pandas.computation.common import _ensure_decoded, _result_type_many
from pandas.computation.scope import _DEFAULT_GLOBALS
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs')
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return com.pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def isscalar(self):
return np.isscalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return com.pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(com.pprint_thing(opr))
for opr in self.operands)
return com.pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def isscalar(self):
return all(operand.isscalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if com.is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if com.is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
res = pd.eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(com.pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = com.pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.isscalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.isscalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.isscalar or self.rhs.isscalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
_cast_inplace(com.flatten(self), np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return com.pprint_thing('{0}({1})'.format(self.op, self.operand))
@property
def return_type(self):
operand = self.operand
if operand.return_type == np.dtype('bool'):
return np.dtype('bool')
if (isinstance(operand, Op) and
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
class MathCall(Op):
def __init__(self, func, args):
super(MathCall, self).__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
return self.func.func(*operands)
def __unicode__(self):
operands = map(str, self.operands)
return com.pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
class FuncNode(object):
def __init__(self, name):
if name not in _mathops:
raise ValueError("\"{0}\" is not a supported function".format(name))
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
|
gpl-2.0
|
francesco-mannella/dmp-esn
|
parametric/parametric_dmp/bin/tr_datasets/e_cursive_curves/data/results/plot.py
|
18
|
1043
|
#!/usr/bin/env python
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
pathname = os.path.dirname(sys.argv[0])
if pathname:
os.chdir(pathname)
n_dim = None
trains = []
for fname in glob.glob("tl*"):
t = np.loadtxt(fname)
trains.append(t)
tests = []
for fname in glob.glob("tt*"):
t = np.loadtxt(fname)
tests.append(t)
trial_results= []
for fname in glob.glob("rtl*"):
t = np.loadtxt(fname)
trial_results.append(t)
test_results= []
for fname in glob.glob("rtt*"):
t = np.loadtxt(fname)
test_results.append(t)
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
for d in trains:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="blue", lw=3, alpha=0.5)
for d in tests:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="red", lw=3, alpha=0.5)
for d in trial_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[0,0,.5], lw=2)
for d in test_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[.5,0,0], lw=2)
plt.show()
|
gpl-2.0
|
davidyezsetz/kuma
|
vendor/packages/ipython/IPython/usage.py
|
7
|
31160
|
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2001-2004 Fernando Perez. <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
__doc__ = """
IPython -- An enhanced Interactive Python
=========================================
A Python shell with automatic history (input and output), dynamic object
introspection, easier configuration, command completion, access to the system
shell and more.
IPython can also be embedded in running programs. See EMBEDDING below.
USAGE
ipython [options] files
If invoked with no options, it executes all the files listed in
sequence and drops you into the interpreter while still acknowledging
any options you may have set in your ipythonrc file. This behavior is
different from standard Python, which when called as python -i will
only execute one file and will ignore your configuration setup.
Please note that some of the configuration options are not available at
the command line, simply because they are not practical here. Look into
your ipythonrc configuration file for details on those. This file
typically installed in the $HOME/.ipython directory.
For Windows users, $HOME resolves to C:\\Documents and
Settings\\YourUserName in most instances, and _ipython is used instead
of .ipython, since some Win32 programs have problems with dotted names
in directories.
In the rest of this text, we will refer to this directory as
IPYTHONDIR.
SPECIAL THREADING OPTIONS
The following special options are ONLY valid at the beginning of the
command line, and not later. This is because they control the initial-
ization of ipython itself, before the normal option-handling mechanism
is active.
-gthread, -qthread, -q4thread, -wthread, -pylab
Only ONE of these can be given, and it can only be given as the
first option passed to IPython (it will have no effect in any
other position). They provide threading support for the GTK, QT
and WXWidgets toolkits, and for the matplotlib library.
With any of the first four options, IPython starts running a
separate thread for the graphical toolkit's operation, so that
you can open and control graphical elements from within an
IPython command line, without blocking. All four provide
essentially the same functionality, respectively for GTK, QT3,
QT4 and WXWidgets (via their Python interfaces).
Note that with -wthread, you can additionally use the -wxversion
option to request a specific version of wx to be used. This
requires that you have the 'wxversion' Python module installed,
which is part of recent wxPython distributions.
If -pylab is given, IPython loads special support for the mat-
plotlib library (http://matplotlib.sourceforge.net), allowing
interactive usage of any of its backends as defined in the
user's .matplotlibrc file. It automatically activates GTK, QT
or WX threading for IPyhton if the choice of matplotlib backend
requires it. It also modifies the %run command to correctly
execute (without blocking) any matplotlib-based script which
calls show() at the end.
-tk The -g/q/q4/wthread options, and -pylab (if matplotlib is
configured to use GTK, QT or WX), will normally block Tk
graphical interfaces. This means that when GTK, QT or WX
threading is active, any attempt to open a Tk GUI will result in
a dead window, and possibly cause the Python interpreter to
crash. An extra option, -tk, is available to address this
issue. It can ONLY be given as a SECOND option after any of the
above (-gthread, -qthread, q4thread, -wthread or -pylab).
If -tk is given, IPython will try to coordinate Tk threading
with GTK, QT or WX. This is however potentially unreliable, and
you will have to test on your platform and Python configuration
to determine whether it works for you. Debian users have
reported success, apparently due to the fact that Debian builds
all of Tcl, Tk, Tkinter and Python with pthreads support. Under
other Linux environments (such as Fedora Core 2/3), this option
has caused random crashes and lockups of the Python interpreter.
Under other operating systems (Mac OSX and Windows), you'll need
to try it to find out, since currently no user reports are
available.
There is unfortunately no way for IPython to determine at run-
time whether -tk will work reliably or not, so you will need to
do some experiments before relying on it for regular work.
A WARNING ABOUT SIGNALS AND THREADS
When any of the thread systems (GTK, QT or WX) are active, either
directly or via -pylab with a threaded backend, it is impossible to
interrupt long-running Python code via Ctrl-C. IPython can not pass
the KeyboardInterrupt exception (or the underlying SIGINT) across
threads, so any long-running process started from IPython will run to
completion, or will have to be killed via an external (OS-based)
mechanism.
To the best of my knowledge, this limitation is imposed by the Python
interpreter itself, and it comes from the difficulty of writing
portable signal/threaded code. If any user is an expert on this topic
and can suggest a better solution, I would love to hear about it. In
the IPython sources, look at the Shell.py module, and in particular at
the runcode() method.
REGULAR OPTIONS
After the above threading options have been given, regular options can
follow in any order. All options can be abbreviated to their shortest
non-ambiguous form and are case-sensitive. One or two dashes can be
used. Some options have an alternate short form, indicated after a |.
Most options can also be set from your ipythonrc configuration file.
See the provided examples for assistance. Options given on the comman-
dline override the values set in the ipythonrc file.
All options with a [no] prepended can be specified in negated form
(using -nooption instead of -option) to turn the feature off.
-h, --help
Show summary of options.
-pylab This can only be given as the first option passed to IPython (it
will have no effect in any other position). It adds special sup-
port for the matplotlib library (http://matplotlib.source-
forge.net), allowing interactive usage of any of its backends as
defined in the user's .matplotlibrc file. It automatically
activates GTK or WX threading for IPyhton if the choice of mat-
plotlib backend requires it. It also modifies the @run command
to correctly execute (without blocking) any matplotlib-based
script which calls show() at the end.
-autocall <val>
Make IPython automatically call any callable object even if you
didn't type explicit parentheses. For example, 'str 43' becomes
'str(43)' automatically. The value can be '0' to disable the
feature, '1' for 'smart' autocall, where it is not applied if
there are no more arguments on the line, and '2' for 'full'
autocall, where all callable objects are automatically called
(even if no arguments are present). The default is '1'.
-[no]autoindent
Turn automatic indentation on/off.
-[no]automagic
Make magic commands automatic (without needing their first char-
acter to be %). Type %magic at the IPython prompt for more
information.
-[no]autoedit_syntax
When a syntax error occurs after editing a file, automatically
open the file to the trouble causing line for convenient fixing.
-[no]banner
Print the intial information banner (default on).
-c <command>
Execute the given command string, and set sys.argv to ['c'].
This is similar to the -c option in the normal Python inter-
preter.
-cache_size|cs <n>
Size of the output cache (maximum number of entries to hold in
memory). The default is 1000, you can change it permanently in
your config file. Setting it to 0 completely disables the
caching system, and the minimum value accepted is 20 (if you
provide a value less than 20, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend
more time re-flushing a too small cache than working.
-classic|cl
Gives IPython a similar feel to the classic Python prompt.
-colors <scheme>
Color scheme for prompts and exception reporting. Currently
implemented: NoColor, Linux, and LightBG.
-[no]color_info
IPython can display information about objects via a set of func-
tions, and optionally can use colors for this, syntax highlight-
ing source code and various other elements. However, because
this information is passed through a pager (like 'less') and
many pagers get confused with color codes, this option is off by
default. You can test it and turn it on permanently in your
ipythonrc file if it works for you. As a reference, the 'less'
pager supplied with Mandrake 8.2 works ok, but that in RedHat
7.2 doesn't.
Test it and turn it on permanently if it works with your system.
The magic function @color_info allows you to toggle this inter-
actively for testing.
-[no]confirm_exit
Set to confirm when you try to exit IPython with an EOF (Con-
trol-D in Unix, Control-Z/Enter in Windows). Note that using the
magic functions @Exit or @Quit you can force a direct exit,
bypassing any confirmation.
-[no]debug
Show information about the loading process. Very useful to pin
down problems with your configuration files or to get details
about session restores.
-[no]deep_reload
IPython can use the deep_reload module which reloads changes in
modules recursively (it replaces the reload() function, so you
don't need to change anything to use it). deep_reload() forces a
full reload of modules whose code may have changed, which the
default reload() function does not.
When deep_reload is off, IPython will use the normal reload(),
but deep_reload will still be available as dreload(). This fea-
ture is off by default [which means that you have both normal
reload() and dreload()].
-editor <name>
Which editor to use with the @edit command. By default, IPython
will honor your EDITOR environment variable (if not set, vi is
the Unix default and notepad the Windows one). Since this editor
is invoked on the fly by IPython and is meant for editing small
code snippets, you may want to use a small, lightweight editor
here (in case your default EDITOR is something like Emacs).
-ipythondir <name>
The name of your IPython configuration directory IPYTHONDIR.
This can also be specified through the environment variable
IPYTHONDIR.
-log|l Generate a log file of all input. The file is named
ipython_log.py in your current directory (which prevents logs
from multiple IPython sessions from trampling each other). You
can use this to later restore a session by loading your logfile
as a file to be executed with option -logplay (see below).
-logfile|lf
Specify the name of your logfile.
-logplay|lp
Replay a previous log. For restoring a session as close as pos-
sible to the state you left it in, use this option (don't just
run the logfile). With -logplay, IPython will try to reconstruct
the previous working environment in full, not just execute the
commands in the logfile.
When a session is restored, logging is automatically turned on
again with the name of the logfile it was invoked with (it is
read from the log header). So once you've turned logging on for
a session, you can quit IPython and reload it as many times as
you want and it will continue to log its history and restore
from the beginning every time.
Caveats: there are limitations in this option. The history vari-
ables _i*,_* and _dh don't get restored properly. In the future
we will try to implement full session saving by writing and
retrieving a failed because of inherent limitations of Python's
Pickle module, so this may have to wait.
-[no]messages
Print messages which IPython collects about its startup process
(default on).
-[no]pdb
Automatically call the pdb debugger after every uncaught excep-
tion. If you are used to debugging using pdb, this puts you
automatically inside of it after any call (either in IPython or
in code called by it) which triggers an exception which goes
uncaught.
-[no]pprint
IPython can optionally use the pprint (pretty printer) module
for displaying results. pprint tends to give a nicer display of
nested data structures. If you like it, you can turn it on per-
manently in your config file (default off).
-profile|p <name>
Assume that your config file is ipythonrc-<name> (looks in cur-
rent dir first, then in IPYTHONDIR). This is a quick way to keep
and load multiple config files for different tasks, especially
if you use the include option of config files. You can keep a
basic IPYTHONDIR/ipythonrc file and then have other 'profiles'
which include this one and load extra things for particular
tasks. For example:
1) $HOME/.ipython/ipythonrc : load basic things you always want.
2) $HOME/.ipython/ipythonrc-math : load (1) and basic math-
related modules.
3) $HOME/.ipython/ipythonrc-numeric : load (1) and Numeric and
plotting modules.
Since it is possible to create an endless loop by having circu-
lar file inclusions, IPython will stop if it reaches 15 recur-
sive inclusions.
-prompt_in1|pi1 <string>
Specify the string used for input prompts. Note that if you are
using numbered prompts, the number is represented with a '\#' in
the string. Don't forget to quote strings with spaces embedded
in them. Default: 'In [\#]: '.
Most bash-like escapes can be used to customize IPython's
prompts, as well as a few additional ones which are IPython-spe-
cific. All valid prompt escapes are described in detail in the
Customization section of the IPython HTML/PDF manual.
-prompt_in2|pi2 <string>
Similar to the previous option, but used for the continuation
prompts. The special sequence '\D' is similar to '\#', but with
all digits replaced dots (so you can have your continuation
prompt aligned with your input prompt). Default: ' .\D.: '
(note three spaces at the start for alignment with 'In [\#]').
-prompt_out|po <string>
String used for output prompts, also uses numbers like
prompt_in1. Default: 'Out[\#]:'.
-quick Start in bare bones mode (no config file loaded).
-rcfile <name>
Name of your IPython resource configuration file. normally
IPython loads ipythonrc (from current directory) or
IPYTHONDIR/ipythonrc. If the loading of your config file fails,
IPython starts with a bare bones configuration (no modules
loaded at all).
-[no]readline
Use the readline library, which is needed to support name com-
pletion and command history, among other things. It is enabled
by default, but may cause problems for users of X/Emacs in
Python comint or shell buffers.
Note that emacs 'eterm' buffers (opened with M-x term) support
IPython's readline and syntax coloring fine, only 'emacs' (M-x
shell and C-c !) buffers do not.
-screen_length|sl <n>
Number of lines of your screen. This is used to control print-
ing of very long strings. Strings longer than this number of
lines will be sent through a pager instead of directly printed.
The default value for this is 0, which means IPython will auto-
detect your screen size every time it needs to print certain
potentially long strings (this doesn't change the behavior of
the 'print' keyword, it's only triggered internally). If for
some reason this isn't working well (it needs curses support),
specify it yourself. Otherwise don't change the default.
-separate_in|si <string>
Separator before input prompts. Default '0.
-separate_out|so <string>
Separator before output prompts. Default: 0 (nothing).
-separate_out2|so2 <string>
Separator after output prompts. Default: 0 (nothing).
-nosep Shorthand for '-separate_in 0 -separate_out 0 -separate_out2 0'.
Simply removes all input/output separators.
-upgrade
Allows you to upgrade your IPYTHONDIR configuration when you
install a new version of IPython. Since new versions may
include new command lines options or example files, this copies
updated ipythonrc-type files. However, it backs up (with a .old
extension) all files which it overwrites so that you can merge
back any custimizations you might have in your personal files.
-Version
Print version information and exit.
-wxversion <string>
Select a specific version of wxPython (used in conjunction with
-wthread). Requires the wxversion module, part of recent
wxPython distributions.
-xmode <modename>
Mode for exception reporting. The valid modes are Plain, Con-
text, and Verbose.
- Plain: similar to python's normal traceback printing.
- Context: prints 5 lines of context source code around each
line in the traceback.
- Verbose: similar to Context, but additionally prints the vari-
ables currently visible where the exception happened (shortening
their strings if too long). This can potentially be very slow,
if you happen to have a huge data structure whose string repre-
sentation is complex to compute. Your computer may appear to
freeze for a while with cpu usage at 100%. If this occurs, you
can cancel the traceback with Ctrl-C (maybe hitting it more than
once).
EMBEDDING
It is possible to start an IPython instance inside your own Python pro-
grams. In the documentation example files there are some illustrations
on how to do this.
This feature allows you to evalutate dynamically the state of your
code, operate with your variables, analyze them, etc. Note however
that any changes you make to values while in the shell do NOT propagate
back to the running code, so it is safe to modify your values because
you won't break your code in bizarre ways by doing so.
"""
cmd_line_usage = __doc__
#---------------------------------------------------------------------------
interactive_usage = """
IPython -- An enhanced Interactive Python
=========================================
IPython offers a combination of convenient shell features, special commands
and a history mechanism for both input (command history) and output (results
caching, similar to Mathematica). It is intended to be a fully compatible
replacement for the standard Python interpreter, while offering vastly
improved functionality and flexibility.
At your system command line, type 'ipython -help' to see the command line
options available. This document only describes interactive features.
Warning: IPython relies on the existence of a global variable called __IP which
controls the shell itself. If you redefine __IP to anything, bizarre behavior
will quickly occur.
MAIN FEATURES
* Access to the standard Python help. As of Python 2.1, a help system is
available with access to object docstrings and the Python manuals. Simply
type 'help' (no quotes) to access it.
* Magic commands: type %magic for information on the magic subsystem.
* System command aliases, via the %alias command or the ipythonrc config file.
* Dynamic object information:
Typing ?word or word? prints detailed information about an object. If
certain strings in the object are too long (docstrings, code, etc.) they get
snipped in the center for brevity.
Typing ??word or word?? gives access to the full information without
snipping long strings. Long strings are sent to the screen through the less
pager if longer than the screen, printed otherwise.
The ?/?? system gives access to the full source code for any object (if
available), shows function prototypes and other useful information.
If you just want to see an object's docstring, type '%pdoc object' (without
quotes, and without % if you have automagic on).
Both %pdoc and ?/?? give you access to documentation even on things which are
not explicitely defined. Try for example typing {}.get? or after import os,
type os.path.abspath??. The magic functions %pdef, %source and %file operate
similarly.
* Completion in the local namespace, by typing TAB at the prompt.
At any time, hitting tab will complete any available python commands or
variable names, and show you a list of the possible completions if there's
no unambiguous one. It will also complete filenames in the current directory.
This feature requires the readline and rlcomplete modules, so it won't work
if your Python lacks readline support (such as under Windows).
* Search previous command history in two ways (also requires readline):
- Start typing, and then use Ctrl-p (previous,up) and Ctrl-n (next,down) to
search through only the history items that match what you've typed so
far. If you use Ctrl-p/Ctrl-n at a blank prompt, they just behave like
normal arrow keys.
- Hit Ctrl-r: opens a search prompt. Begin typing and the system searches
your history for lines that match what you've typed so far, completing as
much as it can.
* Persistent command history across sessions (readline required).
* Logging of input with the ability to save and restore a working session.
* System escape with !. Typing !ls will run 'ls' in the current directory.
* The reload command does a 'deep' reload of a module: changes made to the
module since you imported will actually be available without having to exit.
* Verbose and colored exception traceback printouts. See the magic xmode and
xcolor functions for details (just type %magic).
* Input caching system:
IPython offers numbered prompts (In/Out) with input and output caching. All
input is saved and can be retrieved as variables (besides the usual arrow
key recall).
The following GLOBAL variables always exist (so don't overwrite them!):
_i: stores previous input.
_ii: next previous.
_iii: next-next previous.
_ih : a list of all input _ih[n] is the input from line n.
Additionally, global variables named _i<n> are dynamically created (<n>
being the prompt counter), such that _i<n> == _ih[<n>]
For example, what you typed at prompt 14 is available as _i14 and _ih[14].
You can create macros which contain multiple input lines from this history,
for later re-execution, with the %macro function.
The history function %hist allows you to see any part of your input history
by printing a range of the _i variables. Note that inputs which contain
magic functions (%) appear in the history with a prepended comment. This is
because they aren't really valid Python code, so you can't exec them.
* Output caching system:
For output that is returned from actions, a system similar to the input
cache exists but using _ instead of _i. Only actions that produce a result
(NOT assignments, for example) are cached. If you are familiar with
Mathematica, IPython's _ variables behave exactly like Mathematica's %
variables.
The following GLOBAL variables always exist (so don't overwrite them!):
_ (one underscore): previous output.
__ (two underscores): next previous.
___ (three underscores): next-next previous.
Global variables named _<n> are dynamically created (<n> being the prompt
counter), such that the result of output <n> is always available as _<n>.
Finally, a global dictionary named _oh exists with entries for all lines
which generated output.
* Directory history:
Your history of visited directories is kept in the global list _dh, and the
magic %cd command can be used to go to any entry in that list.
* Auto-parentheses and auto-quotes (adapted from Nathan Gray's LazyPython)
1. Auto-parentheses
Callable objects (i.e. functions, methods, etc) can be invoked like
this (notice the commas between the arguments):
>>> callable_ob arg1, arg2, arg3
and the input will be translated to this:
--> callable_ob(arg1, arg2, arg3)
You can force auto-parentheses by using '/' as the first character
of a line. For example:
>>> /globals # becomes 'globals()'
Note that the '/' MUST be the first character on the line! This
won't work:
>>> print /globals # syntax error
In most cases the automatic algorithm should work, so you should
rarely need to explicitly invoke /. One notable exception is if you
are trying to call a function with a list of tuples as arguments (the
parenthesis will confuse IPython):
In [1]: zip (1,2,3),(4,5,6) # won't work
but this will work:
In [2]: /zip (1,2,3),(4,5,6)
------> zip ((1,2,3),(4,5,6))
Out[2]= [(1, 4), (2, 5), (3, 6)]
IPython tells you that it has altered your command line by
displaying the new command line preceded by -->. e.g.:
In [18]: callable list
-------> callable (list)
2. Auto-Quoting
You can force auto-quoting of a function's arguments by using ',' as
the first character of a line. For example:
>>> ,my_function /home/me # becomes my_function("/home/me")
If you use ';' instead, the whole argument is quoted as a single
string (while ',' splits on whitespace):
>>> ,my_function a b c # becomes my_function("a","b","c")
>>> ;my_function a b c # becomes my_function("a b c")
Note that the ',' MUST be the first character on the line! This
won't work:
>>> x = ,my_function /home/me # syntax error
"""
quick_reference = r"""
IPython -- An enhanced Interactive Python - Quick Reference Card
================================================================
obj?, obj?? : Get help, or more help for object (also works as
?obj, ??obj).
?foo.*abc* : List names in 'foo' containing 'abc' in them.
%magic : Information about IPython's 'magic' % functions.
Magic functions are prefixed by %, and typically take their arguments without
parentheses, quotes or even commas for convenience.
Example magic function calls:
%alias d ls -F : 'd' is now an alias for 'ls -F'
alias d ls -F : Works if 'alias' not a python name
alist = %alias : Get list of aliases to 'alist'
cd /usr/share : Obvious. cd -<tab> to choose from visited dirs.
%cd?? : See help AND source for magic %cd
System commands:
!cp a.txt b/ : System command escape, calls os.system()
cp a.txt b/ : after %rehashx, most system commands work without !
cp ${f}.txt $bar : Variable expansion in magics and system commands
files = !ls /usr : Capture sytem command output
files.s, files.l, files.n: "a b c", ['a','b','c'], 'a\nb\nc'
History:
_i, _ii, _iii : Previous, next previous, next next previous input
_i4, _ih[2:5] : Input history line 4, lines 2-4
exec _i81 : Execute input history line #81 again
%rep 81 : Edit input history line #81
_, __, ___ : previous, next previous, next next previous output
_dh : Directory history
_oh : Output history
%hist : Command history. '%hist -g foo' search history for 'foo'
Autocall:
f 1,2 : f(1,2)
/f 1,2 : f(1,2) (forced autoparen)
,f 1 2 : f("1","2")
;f 1 2 : f("1 2")
Remember: TAB completion works in many contexts, not just file names
or python names.
The following magic functions are currently available:
"""
|
mpl-2.0
|
kevindehecker/paparazzi
|
sw/airborne/test/ahrs/ahrs_utils.py
|
86
|
4923
|
#! /usr/bin/env python
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print("\nBuilding ahrs")
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_" + ahrs_type] + build_opt
#print(args)
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print(" # " + i, end=' ')
print()
print("Running simulation")
print(" using traj " + str(traj_nb))
p = subprocess.Popen(args=["./run_ahrs_on_synth", str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print(" "+i, end=' ')
# print("\n")
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')]
mydescr = np.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print(" " + line, end=' ')
else:
fields = line.strip().split(' ')
#print(fields)
for i, number in enumerate(fields):
data[i].append(number)
print()
for i in xrange(len(mydescr)):
data[i] = np.cast[mydescr[i]](data[i])
return np.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, label, sim_res):
print("Plotting Results")
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty, label=label)
plt.ylabel('degres')
plt.title('phi')
plt.legend()
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_ahrs, lsty)
plt.title('theta')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_ahrs, lsty)
plt.title('psi')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
plt.ylabel('degres/s')
plt.title('p')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
plt.title('q')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
plt.title('r')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
plt.ylabel('degres/s')
plt.xlabel('time in s')
plt.title('bp')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
plt.xlabel('time in s')
plt.title('bq')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
plt.xlabel('time in s')
plt.title('br')
if plot_true_state:
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_true, 'r--')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_true, 'r--')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_true, 'r--')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_true, 'r--')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_true, 'r--')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_true, 'r--')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_true, 'r--')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show()
|
gpl-2.0
|
wathen/PhD
|
MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/GeneralisedEigen/NoBC/3d/MHDfluid.py
|
1
|
12701
|
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
from dolfin import *
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
import MHDallatonce as MHDpreconditioner
import GeneralisedEigenvalues as GE
import os
m = 4
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
Dimensions = np.zeros((m-1,4))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx+0
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
# parameters["form_compiler"]["quadrature_degree"] = 6
# parameters = CP.ParameterSetup()
mesh = UnitCubeMesh(nn,nn,nn)
order = 1
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)+VectorFunctionSpace(mesh, "B",4)
Pressure = FunctionSpace(mesh, "CG", order)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity,Magnetic, Pressure, Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Magnetic.dim(), Pressure.dim(), Lagrange.dim()]
Dimensions[xx-1,:] = np.array([Velocity.dim(), Pressure.dim(), Magnetic.dim(),Lagrange.dim()])
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD3D(4,1)
bcu = DirichletBC(W.sub(0),u0, boundary)
bcb = DirichletBC(W.sub(1),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, b, p, r) = TrialFunctions(W)
(v, c, q, s) = TestFunctions(W)
kappa = 1.0
Mu_m = 10.0
MU = 1.0/1
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
# MO.PrintStr("Preconditioning MHD setup",5,"+","\n\n","\n\n")
HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, 1e-4, params)
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-6,Neumann=Expression(("0","0")),options ="New")
# plot(p_k, interactive = True)
b_t = TrialFunction(Velocity)
c_t = TestFunction(Velocity)
#print assemble(inner(b,c)*dx).array().shape
#print mat
#ShiftedMass = assemble(inner(mat*b,c)*dx)
#as_vector([inner(b,c)[0]*b_k[0],inner(b,c)[1]*(-b_k[1])])
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
# pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x = Iter.u_prev(u_k,b_k,p_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
# plot(b_k)
IterType = 'Full'
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType,"CG", SaddlePoint = "Yes")
RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params,"CG",SaddlePoint = "Yes")
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0","0.0")), boundary)
bcb = DirichletBC(W.sub(1),Expression(("0.0","0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 1 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
# FSpaces = [Velocity,Magnetic,Pressure,Lagrange]
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
# u = b.duplicate()
# P = CP.Assemble(PP)
u_is = PETSc.IS().createGeneral(range(Velocity.dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-3
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
kspMASS = PrecondSetup.Masstest(Lagrange, p0, 1e-6)
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
tic()
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
for bc in bcs:
bc.apply(bb)
FF = AA.sparray()[0:dim[0],0:dim[0]]
A,b = CP.Assemble(AA,bb)
# if iter == 1
if iter == 1:
u = b.duplicate()
F = A.getSubMatrix(u_is,u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
else:
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
F = A.getSubMatrix(u_is,u_is)
n = FacetNormal(mesh)
mat = as_matrix([[b_k[2]*b_k[2]+b[1]*b[1],-b_k[1]*b_k[0],-b_k[0]*b_k[2]],
[-b_k[1]*b_k[0],b_k[0]*b_k[0]+b_k[2]*b_k[2],-b_k[2]*b_k[1]],
[-b_k[0]*b_k[2],-b_k[1]*b_k[2],b_k[0]*b_k[0]+b_k[1]*b_k[1]]])
a = params[2]*inner(grad(b_t), grad(c_t))*dx(W.mesh()) + inner((grad(b_t)*u_k),c_t)*dx(W.mesh()) +(1/2)*div(u_k)*inner(c_t,b_t)*dx(W.mesh()) - (1/2)*inner(u_k,n)*inner(c_t,b_t)*ds(W.mesh())+kappa/Mu_m*inner(mat*b_t,c_t)*dx(W.mesh())
a = kappa/Mu_m*inner(mat*b_t,c_t)*dx(W.mesh())
ShiftedMass = assemble(a)
#bcu.apply(ShiftedMass)
#MO.StoreMatrix(AA.sparray()[0:dim[0],0:dim[0]]+ShiftedMass.sparray(),"A")
FF = CP.Assemble(ShiftedMass)
kspF = NSprecondSetup.LSCKSPnonlinear(FF)
# if iter == 1:
if iter == 1:
u = b.duplicate()
print ("{:40}").format("MHD assemble, time: "), " ==> ",("{:4f}").format(toc()), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
print "Inititial guess norm: ", u.norm()
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_WORLD)
pc = ksp.getPC()
ksp.setType('gmres')
pc.setType('python')
pc.setType(PETSc.PC.Type.PYTHON)
# FSpace = [Velocity,Magnetic,Pressure,Lagrange]
reshist = {}
def monitor(ksp, its, fgnorm):
reshist[its] = fgnorm
print its," OUTER:", fgnorm
# ksp.setMonitor(monitor)
ksp.max_it = 1000
FFSS = [Velocity,Magnetic,Pressure,Lagrange]
pc.setPythonContext(MHDpreconditioner.InnerOuterMAGNETICinverse(FFSS,kspF, KSPlinearfluids[0], KSPlinearfluids[1],Fp, HiptmairMatrices[3], HiptmairMatrices[4], HiptmairMatrices[2], HiptmairMatrices[0], HiptmairMatrices[1], HiptmairMatrices[6],1e-6,FF))
# OptDB = PETSc.Options()
# OptDB['pc_factor_mat_solver_package'] = "mumps"
# OptDB['pc_factor_mat_ordering_type'] = "rcm"
# ksp.setFromOptions()
ksp.max_it = 5
scale = b.norm()
b = b/scale
ksp.setOperators(A,A)
stime = time.time()
ksp.solve(b,u)
Soltime = time.time()- stime
NSits += ksp.its
# Mits +=dodim
u = u*scale
SolutionTime = SolutionTime +Soltime
AA = assemble(ns+maxwell+CoupleTerm)
AA = CP.Assemble(AA)
MO.PrintStr("Number of iterations ="+str(ksp.its),60,"+","\n\n","\n\n")
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter, SaddlePoint = "Yes")
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
uOld= np.concatenate((u_k.vector().array(),b_k.vector().array(),p_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
PCD = [MatrixLinearFluids[0], MatrixLinearFluids[1], Fp]
if iter == 1:
t = TestFunction(Lagrange)
q = TrialFunction(Lagrange)
M = CP.Assemble(assemble(inner((t),(q))*dx))
t = TestFunction(Magnetic)
q = TrialFunction(Magnetic)
#AA = CP.Assemble(assemble(inner(grad(t),grad(q))*dx))
print W
LL = HiptmairMatrices[4].getOperators()[0]
print LL
A = assemble(maxwell+ns+CoupleTerm)
A = CP.Assemble(A)
A, P, Pinner, Papprox = GE.eigensORIG(AA,A, W, PCD, M)
#A, P = GE.eigensApprox(A, W, PCD, HiptmairMatrices[4],HiptmairMatrices[6], FF)
#A, P = GE.eigens(A, W, PCD, kspMASS)
its = GE.ShiftedCDtest(FF)
path = 'TESTmatrix_kappa='+str(kappa)+'_nu_m='+str(Mu_m)+'_nu='+str(MU)
if not os.path.exists(path):
os.makedirs(path)
os.chdir(path)
Mits += its
MO.StoreMatrix(CP.PETSc2Scipy(M),"M_"+str(iter)+"_"+str(nn))
#MO.StoreMatrix(CP.PETSc2Scipy(AA),"LL_"+str(iter)+"_"+str(nn))
MO.StoreMatrix(A,"A_"+str(iter)+"_"+str(nn))
MO.StoreMatrix(CP.PETSc2Scipy(FF),"SM_"+str(iter)+"_"+str(nn))
MO.StoreMatrix(P,"P_"+str(iter)+"_"+str(nn))
MO.StoreMatrix(Pinner,"Pinner_"+str(iter)+"_"+str(nn))
MO.StoreMatrix(Papprox,"Papprox_"+str(iter)+"_"+str(nn))
os.chdir('..')
XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
print SolTime
import pandas as pd
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,NSave,Mave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable.to_latex()
print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
print Dimensions
np.savetxt('dimensions.t',Dimensions)
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(interpolate(ue,Velocity))
# plot(p_k)
# plot(interpolate(pe,Pressure))
# plot(b_k)
# plot(interpolate(be,Magnetic))
# plot(r_k)
# plot(interpolate(re,Lagrange))
# interactive()
interactive()
|
mit
|
louispotok/pandas
|
pandas/io/sas/sas7bdat.py
|
3
|
27470
|
"""
Read SAS7BDAT files
Based on code written by Jared Hobbs:
https://bitbucket.org/jaredhobbs/sas7bdat
See also:
https://github.com/BioStatMatt/sas7bdat
Partial documentation of the file format:
https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf
Reference for binary data compression:
http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
"""
import pandas as pd
from pandas import compat
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas.errors import EmptyDataError
import numpy as np
import struct
import pandas.io.sas.sas_constants as const
from pandas.io.sas._sas import Parser
class _subheader_pointer(object):
pass
class _column(object):
pass
# SAS7BDAT represents a SAS data file in SAS7BDAT format.
class SAS7BDATReader(BaseIterator):
"""
Read SAS files in SAS7BDAT format.
Parameters
----------
path_or_buf : path name or buffer
Name of SAS file or file-like object pointing to SAS file
contents.
index : column identifier, defaults to None
Column to use as index.
convert_dates : boolean, defaults to True
Attempt to convert dates to Pandas datetime values. Note that
some rarely used SAS date formats may be unsupported.
blank_missing : boolean, defaults to True
Convert empty strings to missing values (SAS uses blanks to
indicate missing character variables).
chunksize : int, defaults to None
Return SAS7BDATReader object for iterations, returns chunks
with given number of lines.
encoding : string, defaults to None
String encoding.
convert_text : bool, defaults to True
If False, text variables are left as raw bytes.
convert_header_text : bool, defaults to True
If False, header text, including column names, are left as raw
bytes.
"""
def __init__(self, path_or_buf, index=None, convert_dates=True,
blank_missing=True, chunksize=None, encoding=None,
convert_text=True, convert_header_text=True):
self.index = index
self.convert_dates = convert_dates
self.blank_missing = blank_missing
self.chunksize = chunksize
self.encoding = encoding
self.convert_text = convert_text
self.convert_header_text = convert_header_text
self.default_encoding = "latin-1"
self.compression = ""
self.column_names_strings = []
self.column_names = []
self.column_types = []
self.column_formats = []
self.columns = []
self._current_page_data_subheader_pointers = []
self._cached_page = None
self._column_data_lengths = []
self._column_data_offsets = []
self._current_row_in_file_index = 0
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
self._path_or_buf, _, _, _ = get_filepath_or_buffer(path_or_buf)
if isinstance(self._path_or_buf, compat.string_types):
self._path_or_buf = open(self._path_or_buf, 'rb')
self.handle = self._path_or_buf
self._get_properties()
self._parse_metadata()
def close(self):
try:
self.handle.close()
except AttributeError:
pass
def _get_properties(self):
# Check magic number
self._path_or_buf.seek(0)
self._cached_page = self._path_or_buf.read(288)
if self._cached_page[0:len(const.magic)] != const.magic:
self.close()
raise ValueError("magic number mismatch (not a SAS file?)")
# Get alignment information
align1, align2 = 0, 0
buf = self._read_bytes(const.align_1_offset, const.align_1_length)
if buf == const.u64_byte_checker_value:
align2 = const.align_2_value
self.U64 = True
self._int_length = 8
self._page_bit_offset = const.page_bit_offset_x64
self._subheader_pointer_length = const.subheader_pointer_length_x64
else:
self.U64 = False
self._page_bit_offset = const.page_bit_offset_x86
self._subheader_pointer_length = const.subheader_pointer_length_x86
self._int_length = 4
buf = self._read_bytes(const.align_2_offset, const.align_2_length)
if buf == const.align_1_checker_value:
align1 = const.align_2_value
total_align = align1 + align2
# Get endianness information
buf = self._read_bytes(const.endianness_offset,
const.endianness_length)
if buf == b'\x01':
self.byte_order = "<"
else:
self.byte_order = ">"
# Get encoding information
buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
if buf in const.encoding_names:
self.file_encoding = const.encoding_names[buf]
else:
self.file_encoding = "unknown (code=%s)" % str(buf)
# Get platform information
buf = self._read_bytes(const.platform_offset, const.platform_length)
if buf == b'1':
self.platform = "unix"
elif buf == b'2':
self.platform = "windows"
else:
self.platform = "unknown"
buf = self._read_bytes(const.dataset_offset, const.dataset_length)
self.name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.name = self.name.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.file_type_offset, const.file_type_length)
self.file_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.file_type = self.file_type.decode(
self.encoding or self.default_encoding)
# Timestamp is epoch 01/01/1960
epoch = pd.datetime(1960, 1, 1)
x = self._read_float(const.date_created_offset + align1,
const.date_created_length)
self.date_created = epoch + pd.to_timedelta(x, unit='s')
x = self._read_float(const.date_modified_offset + align1,
const.date_modified_length)
self.date_modified = epoch + pd.to_timedelta(x, unit='s')
self.header_length = self._read_int(const.header_size_offset + align1,
const.header_size_length)
# Read the rest of the header into cached_page.
buf = self._path_or_buf.read(self.header_length - 288)
self._cached_page += buf
if len(self._cached_page) != self.header_length:
self.close()
raise ValueError("The SAS7BDAT file appears to be truncated.")
self._page_length = self._read_int(const.page_size_offset + align1,
const.page_size_length)
self._page_count = self._read_int(const.page_count_offset + align1,
const.page_count_length)
buf = self._read_bytes(const.sas_release_offset + total_align,
const.sas_release_length)
self.sas_release = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.sas_release = self.sas_release.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.sas_server_type_offset + total_align,
const.sas_server_type_length)
self.server_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.server_type = self.server_type.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_version_number_offset + total_align,
const.os_version_number_length)
self.os_version = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_version = self.os_version.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_name_offset + total_align,
const.os_name_length)
buf = buf.rstrip(b'\x00 ')
if len(buf) > 0:
self.os_name = buf.decode(self.encoding or self.default_encoding)
else:
buf = self._read_bytes(const.os_maker_offset + total_align,
const.os_maker_length)
self.os_name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_name = self.os_name.decode(
self.encoding or self.default_encoding)
def __next__(self):
da = self.read(nrows=self.chunksize or 1)
if da is None:
raise StopIteration
return da
# Read a single float of the given width (4 or 8).
def _read_float(self, offset, width):
if width not in (4, 8):
self.close()
raise ValueError("invalid float width")
buf = self._read_bytes(offset, width)
fd = "f" if width == 4 else "d"
return struct.unpack(self.byte_order + fd, buf)[0]
# Read a single signed integer of the given width (1, 2, 4 or 8).
def _read_int(self, offset, width):
if width not in (1, 2, 4, 8):
self.close()
raise ValueError("invalid int width")
buf = self._read_bytes(offset, width)
it = {1: "b", 2: "h", 4: "l", 8: "q"}[width]
iv = struct.unpack(self.byte_order + it, buf)[0]
return iv
def _read_bytes(self, offset, length):
if self._cached_page is None:
self._path_or_buf.seek(offset)
buf = self._path_or_buf.read(length)
if len(buf) < length:
self.close()
msg = "Unable to read {:d} bytes from file position {:d}."
raise ValueError(msg.format(length, offset))
return buf
else:
if offset + length > len(self._cached_page):
self.close()
raise ValueError("The cached page is too small.")
return self._cached_page[offset:offset + length]
def _parse_metadata(self):
done = False
while not done:
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
break
if len(self._cached_page) != self._page_length:
self.close()
raise ValueError(
"Failed to read a meta data page from the SAS file.")
done = self._process_page_meta()
def _process_page_meta(self):
self._read_page_header()
pt = [const.page_meta_type, const.page_amd_type] + const.page_mix_types
if self._current_page_type in pt:
self._process_page_metadata()
return ((self._current_page_type in [256] + const.page_mix_types) or
(self._current_page_data_subheader_pointers is not None))
def _read_page_header(self):
bit_offset = self._page_bit_offset
tx = const.page_type_offset + bit_offset
self._current_page_type = self._read_int(tx, const.page_type_length)
tx = const.block_count_offset + bit_offset
self._current_page_block_count = self._read_int(
tx, const.block_count_length)
tx = const.subheader_count_offset + bit_offset
self._current_page_subheaders_count = (
self._read_int(tx, const.subheader_count_length))
def _process_page_metadata(self):
bit_offset = self._page_bit_offset
for i in range(self._current_page_subheaders_count):
pointer = self._process_subheader_pointers(
const.subheader_pointers_offset + bit_offset, i)
if pointer.length == 0:
continue
if pointer.compression == const.truncated_subheader_id:
continue
subheader_signature = self._read_subheader_signature(
pointer.offset)
subheader_index = (
self._get_subheader_index(subheader_signature,
pointer.compression, pointer.ptype))
self._process_subheader(subheader_index, pointer)
def _get_subheader_index(self, signature, compression, ptype):
index = const.subheader_signature_to_index.get(signature)
if index is None:
f1 = ((compression == const.compressed_subheader_id) or
(compression == 0))
f2 = (ptype == const.compressed_subheader_type)
if (self.compression != "") and f1 and f2:
index = const.SASIndex.data_subheader_index
else:
self.close()
raise ValueError("Unknown subheader signature")
return index
def _process_subheader_pointers(self, offset, subheader_pointer_index):
subheader_pointer_length = self._subheader_pointer_length
total_offset = (offset +
subheader_pointer_length * subheader_pointer_index)
subheader_offset = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_length = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_compression = self._read_int(total_offset, 1)
total_offset += 1
subheader_type = self._read_int(total_offset, 1)
x = _subheader_pointer()
x.offset = subheader_offset
x.length = subheader_length
x.compression = subheader_compression
x.ptype = subheader_type
return x
def _read_subheader_signature(self, offset):
subheader_signature = self._read_bytes(offset, self._int_length)
return subheader_signature
def _process_subheader(self, subheader_index, pointer):
offset = pointer.offset
length = pointer.length
if subheader_index == const.SASIndex.row_size_index:
processor = self._process_rowsize_subheader
elif subheader_index == const.SASIndex.column_size_index:
processor = self._process_columnsize_subheader
elif subheader_index == const.SASIndex.column_text_index:
processor = self._process_columntext_subheader
elif subheader_index == const.SASIndex.column_name_index:
processor = self._process_columnname_subheader
elif subheader_index == const.SASIndex.column_attributes_index:
processor = self._process_columnattributes_subheader
elif subheader_index == const.SASIndex.format_and_label_index:
processor = self._process_format_subheader
elif subheader_index == const.SASIndex.column_list_index:
processor = self._process_columnlist_subheader
elif subheader_index == const.SASIndex.subheader_counts_index:
processor = self._process_subheader_counts
elif subheader_index == const.SASIndex.data_subheader_index:
self._current_page_data_subheader_pointers.append(pointer)
return
else:
raise ValueError("unknown subheader index")
processor(offset, length)
def _process_rowsize_subheader(self, offset, length):
int_len = self._int_length
lcs_offset = offset
lcp_offset = offset
if self.U64:
lcs_offset += 682
lcp_offset += 706
else:
lcs_offset += 354
lcp_offset += 378
self.row_length = self._read_int(
offset + const.row_length_offset_multiplier * int_len, int_len)
self.row_count = self._read_int(
offset + const.row_count_offset_multiplier * int_len, int_len)
self.col_count_p1 = self._read_int(
offset + const.col_count_p1_multiplier * int_len, int_len)
self.col_count_p2 = self._read_int(
offset + const.col_count_p2_multiplier * int_len, int_len)
mx = const.row_count_on_mix_page_offset_multiplier * int_len
self._mix_page_row_count = self._read_int(offset + mx, int_len)
self._lcs = self._read_int(lcs_offset, 2)
self._lcp = self._read_int(lcp_offset, 2)
def _process_columnsize_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
self.column_count = self._read_int(offset, int_len)
if (self.col_count_p1 + self.col_count_p2 !=
self.column_count):
print("Warning: column count mismatch (%d + %d != %d)\n",
self.col_count_p1, self.col_count_p2, self.column_count)
# Unknown purpose
def _process_subheader_counts(self, offset, length):
pass
def _process_columntext_subheader(self, offset, length):
offset += self._int_length
text_block_size = self._read_int(offset, const.text_block_size_length)
buf = self._read_bytes(offset, text_block_size)
cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
cname = cname_raw
if self.convert_header_text:
cname = cname.decode(self.encoding or self.default_encoding)
self.column_names_strings.append(cname)
if len(self.column_names_strings) == 1:
compression_literal = ""
for cl in const.compression_literals:
if cl in cname_raw:
compression_literal = cl
self.compression = compression_literal
offset -= self._int_length
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
compression_literal = buf.rstrip(b"\x00")
if compression_literal == "":
self._lcs = 0
offset1 = offset + 32
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif compression_literal == const.rle_compression:
offset1 = offset + 40
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif self._lcs > 0:
self._lcp = 0
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcs)
self.creator_proc = buf[0:self._lcp]
if self.convert_header_text:
if hasattr(self, "creator_proc"):
self.creator_proc = self.creator_proc.decode(
self.encoding or self.default_encoding)
def _process_columnname_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
column_name_pointers_count = (length - 2 * int_len - 12) // 8
for i in range(column_name_pointers_count):
text_subheader = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_text_subheader_offset
col_name_offset = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_offset_offset
col_name_length = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_length_offset
idx = self._read_int(
text_subheader, const.column_name_text_subheader_length)
col_offset = self._read_int(
col_name_offset, const.column_name_offset_length)
col_len = self._read_int(
col_name_length, const.column_name_length_length)
name_str = self.column_names_strings[idx]
self.column_names.append(name_str[col_offset:col_offset + col_len])
def _process_columnattributes_subheader(self, offset, length):
int_len = self._int_length
column_attributes_vectors_count = (
length - 2 * int_len - 12) // (int_len + 8)
self.column_types = np.empty(
column_attributes_vectors_count, dtype=np.dtype('S1'))
self._column_data_lengths = np.empty(
column_attributes_vectors_count, dtype=np.int64)
self._column_data_offsets = np.empty(
column_attributes_vectors_count, dtype=np.int64)
for i in range(column_attributes_vectors_count):
col_data_offset = (offset + int_len +
const.column_data_offset_offset +
i * (int_len + 8))
col_data_len = (offset + 2 * int_len +
const.column_data_length_offset +
i * (int_len + 8))
col_types = (offset + 2 * int_len +
const.column_type_offset + i * (int_len + 8))
x = self._read_int(col_data_offset, int_len)
self._column_data_offsets[i] = x
x = self._read_int(col_data_len, const.column_data_length_length)
self._column_data_lengths[i] = x
x = self._read_int(col_types, const.column_type_length)
if x == 1:
self.column_types[i] = b'd'
else:
self.column_types[i] = b's'
def _process_columnlist_subheader(self, offset, length):
# unknown purpose
pass
def _process_format_subheader(self, offset, length):
int_len = self._int_length
text_subheader_format = (
offset +
const.column_format_text_subheader_index_offset +
3 * int_len)
col_format_offset = (offset +
const.column_format_offset_offset +
3 * int_len)
col_format_len = (offset +
const.column_format_length_offset +
3 * int_len)
text_subheader_label = (
offset +
const.column_label_text_subheader_index_offset +
3 * int_len)
col_label_offset = (offset +
const.column_label_offset_offset +
3 * int_len)
col_label_len = offset + const.column_label_length_offset + 3 * int_len
x = self._read_int(text_subheader_format,
const.column_format_text_subheader_index_length)
format_idx = min(x, len(self.column_names_strings) - 1)
format_start = self._read_int(
col_format_offset, const.column_format_offset_length)
format_len = self._read_int(
col_format_len, const.column_format_length_length)
label_idx = self._read_int(
text_subheader_label,
const.column_label_text_subheader_index_length)
label_idx = min(label_idx, len(self.column_names_strings) - 1)
label_start = self._read_int(
col_label_offset, const.column_label_offset_length)
label_len = self._read_int(col_label_len,
const.column_label_length_length)
label_names = self.column_names_strings[label_idx]
column_label = label_names[label_start: label_start + label_len]
format_names = self.column_names_strings[format_idx]
column_format = format_names[format_start: format_start + format_len]
current_column_number = len(self.columns)
col = _column()
col.col_id = current_column_number
col.name = self.column_names[current_column_number]
col.label = column_label
col.format = column_format
col.ctype = self.column_types[current_column_number]
col.length = self._column_data_lengths[current_column_number]
self.column_formats.append(column_format)
self.columns.append(col)
def read(self, nrows=None):
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
elif nrows is None:
nrows = self.row_count
if len(self.column_types) == 0:
self.close()
raise EmptyDataError("No columns to parse from file")
if self._current_row_in_file_index >= self.row_count:
return None
m = self.row_count - self._current_row_in_file_index
if nrows > m:
nrows = m
nd = (self.column_types == b'd').sum()
ns = (self.column_types == b's').sum()
self._string_chunk = np.empty((ns, nrows), dtype=np.object)
self._byte_chunk = np.empty((nd, 8 * nrows), dtype=np.uint8)
self._current_row_in_chunk_index = 0
p = Parser(self)
p.read(nrows)
rslt = self._chunk_to_dataframe()
if self.index is not None:
rslt = rslt.set_index(self.index)
return rslt
def _read_next_page(self):
self._current_page_data_subheader_pointers = []
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
return True
elif len(self._cached_page) != self._page_length:
self.close()
msg = ("failed to read complete page from file "
"(read {:d} of {:d} bytes)")
raise ValueError(msg.format(len(self._cached_page),
self._page_length))
self._read_page_header()
if self._current_page_type == const.page_meta_type:
self._process_page_metadata()
pt = [const.page_meta_type, const.page_data_type]
pt += [const.page_mix_types]
if self._current_page_type not in pt:
return self._read_next_page()
return False
def _chunk_to_dataframe(self):
n = self._current_row_in_chunk_index
m = self._current_row_in_file_index
ix = range(m - n, m)
rslt = pd.DataFrame(index=ix)
js, jb = 0, 0
for j in range(self.column_count):
name = self.column_names[j]
if self.column_types[j] == b'd':
rslt[name] = self._byte_chunk[jb, :].view(
dtype=self.byte_order + 'd')
rslt[name] = np.asarray(rslt[name], dtype=np.float64)
if self.convert_dates:
unit = None
if self.column_formats[j] in const.sas_date_formats:
unit = 'd'
elif self.column_formats[j] in const.sas_datetime_formats:
unit = 's'
if unit:
rslt[name] = pd.to_datetime(rslt[name], unit=unit,
origin="1960-01-01")
jb += 1
elif self.column_types[j] == b's':
rslt[name] = self._string_chunk[js, :]
if self.convert_text and (self.encoding is not None):
rslt[name] = rslt[name].str.decode(
self.encoding or self.default_encoding)
if self.blank_missing:
ii = rslt[name].str.len() == 0
rslt.loc[ii, name] = np.nan
js += 1
else:
self.close()
raise ValueError("unknown column type %s" %
self.column_types[j])
return rslt
|
bsd-3-clause
|
alan-unravel/bokeh
|
bokeh/compat/bokeh_exporter.py
|
38
|
1508
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from matplotlib.collections import LineCollection, PolyCollection
from .mplexporter.exporter import Exporter
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class BokehExporter(Exporter):
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
if isinstance(collection, LineCollection):
self.renderer.make_line_collection(collection)
elif isinstance(collection, PolyCollection):
self.renderer.make_poly_collection(collection)
else:
super(BokehExporter, self).draw_collection(ax, collection, force_pathtrans, force_offsettrans)
def draw_patch(self, ax, patch, force_trans=None):
super(BokehExporter, self).draw_patch(ax, patch, force_trans)
|
bsd-3-clause
|
Cryptoverse/cryptoverse-probe
|
main.py
|
1
|
58080
|
from json import dumps as json_dump
from os import getenv, environ
from sys import stdout, platform
from traceback import print_exc as print_exception
from datetime import datetime
from time import sleep
from ete3 import Tree
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from getch import getch
from probe_exceptions import CommandException, ProbeTimeoutException
import requests
import database
import util
import validate
import parameter_util as putil
import matplotlib
matplotlib.use('TkAgg')
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import
import matplotlib.colors as pycolors
import matplotlib.pyplot as pyplot
AUTO_REBUILD = int(getenv('AUTO_REBUILD', '0')) == 1
# HOST_URL = getenv('HOST_URL', 'http://localhost:5000')
HOST_URL = getenv('HOST_URL', 'http://api.cryptoverse.io')
RULES_URL = HOST_URL + '/rules'
CHAINS_URL = HOST_URL + '/chains'
STAR_LOGS_URL = HOST_URL + '/star-logs'
EVENTS_URL = HOST_URL + '/events'
DEFAULT_COLOR = '\033[0m'
SUCCESS_COLOR = '\033[92m'
ERROR_COLOR = '\033[91m'
BOLD_COLOR = '\033[1m'
CURSOR_ERASE_SEQUENCE = '\033[K'
CURSOR_FORWARD_SEQUENCE = '\033[%sC'
def get_genesis():
return {
'nonce': 0,
'height': 0,
'hash': util.EMPTY_TARGET,
'difficulty': util.difficultyStart(),
'events': [],
'version': 0,
'time': 0,
'previous_hash': util.EMPTY_TARGET,
'events_hash': None,
'meta': None,
'meta_hash': None
}
def get_event_signature(fleet_hash=None, fleet_key=None, event_hash=None, inputs=None, outputs=None, signature=None, event_type=None):
return {
'fleet_hash': fleet_hash,
'fleet_key': fleet_key,
'hash': event_hash,
'inputs': [] if inputs is None else inputs,
'outputs': [] if outputs is None else outputs,
'signature': signature,
'type': event_type
}
def get_event_input(index, key):
return {
'index': index,
'key': key
}
def get_event_output(index, count, fleet_hash, key, star_system, type_name):
return {
'index': index,
'count': count,
'fleet_hash': fleet_hash,
'key': key,
'star_system': star_system,
'type': type_name
}
def generate_account(name='default'):
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
private_serialized = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
)
public_serialized = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
public_lines = public_serialized.splitlines()
public_shrunk = ''
for line in range(1, len(public_lines) - 1):
public_shrunk += public_lines[line].strip('\n')
return {
'name': name,
'private_key': private_serialized,
'public_key': public_shrunk
}
def pretty_json(serialized):
return json_dump(serialized, sort_keys=True, indent=4, separators=(',', ': '))
def get_request(url, payload=None):
try:
return requests.get(url, payload).json()
except:
print_exception()
print 'error on get request '+url
def post_request(url, payload=None):
try:
serialized = json_dump(payload)
return requests.post(url, data=serialized, headers={'content-type': 'application/json', 'cache-control': 'no-cache', }).json()
except:
print_exception()
print 'error on post request '+url
def create_command(function, description, details=None):
return {
'function': function,
'description': description,
'details': details
}
def command_help(commands, params=None):
help_message = '%sThis help message'
exit_message = '%sEnds this process'
if params:
if 0 < len(params):
queried_command_name = params[0]
selection = commands.get(queried_command_name, None)
if selection:
print '%s' % selection['description']
details = selection['details']
if details:
for detail in selection['details']:
print '\t - %s' % detail
return
elif queried_command_name == 'help':
print help_message % ''
return
elif queried_command_name == 'exit':
print exit_message % ''
return
raise CommandException('Command "%s" is not recognized, try typing "help" for a list of all commands' % queried_command_name)
print help_message % 'help\t - '
for command in commands:
print '%s\t - %s' % (command, commands[command]['description'])
print exit_message % 'exit\t - '
def account(params=None):
if putil.retrieve(params, '-a', True, False):
account_all()
elif putil.retrieve(params, '-s', True, False):
account_set(putil.retrieve_value(params, '-s', None))
elif putil.retrieve(params, '-c', True, False):
account_create(putil.retrieve_value(params, '-c', None))
else:
result = database.get_account()
if result:
print 'Using account "%s"' % result['name']
print '\tFleet Hash: %s' % util.get_fleet_hash_name(result['public_key'])
else:
print 'No active account'
def account_all():
message = 'No account information found'
accounts = database.get_accounts()
if accounts:
message = 'Persistent data contains the following account entries'
entry_message = '\n%s\t- %s\n\t\tFleet Hash: %s'
for currentAccount in accounts:
active_flag = '[CURR] ' if currentAccount['active'] else ''
message += entry_message % (active_flag, currentAccount['name'], util.get_fleet_hash_name(currentAccount['public_key']))
print message
def account_set(name):
if not name:
raise CommandException('Account cannot be set to None')
if not database.any_account(name):
raise CommandException('Unable to find account %s' % name)
database.set_account_active(name)
print 'Current account is now "%s"' % name
def account_create(name):
if not name:
raise CommandException('Include a unique name for this account')
elif database.any_account(name):
raise CommandException('An account named "%s" already exists' % name)
created_account = generate_account(name)
database.add_account(created_account)
database.set_account_active(name)
print 'Created and activated account "%s"' % name
def info():
print 'Connected to %s with fudge %s, interval %s, duration %s' % (HOST_URL, util.difficultyFudge(), util.difficultyInterval(), util.difficultyDuration())
def star_log(params=None):
target_hash = None
if putil.has_any(params):
if putil.has_single(params):
target_hash = putil.single_str(params)
# TODO: Actually support target_hash.
print pretty_json(get_request(STAR_LOGS_URL))
def probe(params=None):
# TODO: Sync first...
from_genesis = putil.retrieve(params, '-g', True, False)
post = putil.retrieve(params, '-a', False, True)
verbose = putil.retrieve(params, '-v', True, False)
silent = putil.retrieve(params, '-s', True, False)
allow_duplicate_events = putil.retrieve(params, '-d', True, False)
from_query = putil.retrieve_value(params, '-f', None)
loop = putil.retrieve(params, '-l', True, False)
wait = float(putil.retrieve_value(params, '-w', 0.0))
blind = putil.retrieve(params, '-b', True, False)
if wait < 0:
raise CommandException('Cannot use a wait less than zero seconds')
from_hash = None
if from_query is not None:
from_hash = putil.natural_match(from_query, database.get_star_log_hashes())
if from_hash is None:
raise CommandException('Unable to find a system hash containing %s' % from_query)
if not blind:
sync('-s')
generated = None
started = datetime.now()
while generated is None:
try:
generated = generate_next_star_log(from_hash, from_genesis, allow_duplicate_events, started)
except ProbeTimeoutException:
if not blind:
sync('-s')
if not silent:
print 'Probed new starlog %s' % util.get_system_name(generated['hash'])
if verbose:
print pretty_json(generated)
if not post:
return
try:
result = post_request(STAR_LOGS_URL, generated)
if result == 200:
database.add_star_log(generated)
if not silent:
prefix, postfix = SUCCESS_COLOR if result == 200 else ERROR_COLOR, DEFAULT_COLOR
print 'Posted starlog with response %s%s%s' % (prefix, result, postfix)
except:
print_exception()
print 'Something went wrong when trying to post the generated starlog'
if loop:
if 0 < wait:
sleep(wait)
probe(params)
def generate_next_star_log(from_star_log=None, from_genesis=False, allow_duplicate_events=False, start_time=None, timeout=180):
next_star_log = get_genesis()
if from_star_log:
next_star_log = database.get_star_log(from_star_log)
elif not from_genesis:
local_highest = database.get_star_log_highest()
if local_highest is not None:
next_star_log = local_highest
is_genesis = util.is_genesis_star_log(next_star_log['hash'])
account_info = database.get_account()
next_star_log['events'] = []
if not is_genesis:
event_results = get_request(EVENTS_URL, {'limit': util.eventsMaxLimit()})
if event_results:
unused_events = []
for unused_event in database.get_unused_events(from_star_log=next_star_log['hash']):
unused_events.append(unused_event['key'])
used_inputs = []
used_outputs = []
events = []
for event in event_results:
validate.event(event, require_index=False, require_star_system=True, reward_allowed=False)
conflict = False
current_used_inputs = []
for current_input in event['inputs']:
conflict = current_input['key'] in used_inputs + current_used_inputs or current_input['key'] not in unused_events
if conflict:
break
current_used_inputs.append(current_input['key'])
if conflict:
continue
current_used_outputs = []
for current_output in event['outputs']:
output_key = current_output['key']
conflict = output_key in used_inputs + used_outputs + current_used_inputs + current_used_outputs
if conflict:
break
current_used_outputs.append(output_key)
if conflict:
continue
if not allow_duplicate_events:
if database.any_events_used(current_used_inputs, next_star_log['hash']) or database.any_events_exist(current_used_outputs, next_star_log['hash']):
continue
used_inputs += current_used_inputs
used_outputs += current_used_outputs
event['index'] = len(events)
events.append(event)
next_star_log['events'] += events
reward_output = {
'index': 0,
'type': 'reward',
'fleet_hash': util.sha256(account_info['public_key']),
'key': util.get_unique_key(),
'star_system': None,
'count': util.shipReward(),
}
reward_event = {
'index': len(next_star_log['events']),
'hash': None,
'type': 'reward',
'fleet_hash': util.sha256(account_info['public_key']),
'fleet_key': account_info['public_key'],
'inputs': [],
'outputs': [
reward_output
],
'signature': None
}
if not is_genesis:
# TODO: This won't work correctly if there are multiple genesis blocks!
# TODO: Change this to get from the local database
first_star_log = get_request(CHAINS_URL, {'height': 0})
# Until we have a way to select where to send your reward ships, just send them to the genesis block.
reward_output['star_system'] = first_star_log[0]['hash']
reward_event['hash'] = util.hash_event(reward_event)
reward_event['signature'] = util.rsa_sign(account_info['private_key'], reward_event['hash'])
meta = database.get_meta_content()
next_star_log['meta'] = '' if meta is None else meta
next_star_log['meta_hash'] = util.sha256(next_star_log['meta'])
next_star_log['events'].append(reward_event)
next_star_log['previous_hash'] = next_star_log['hash']
next_star_log['time'] = util.get_time()
next_star_log['nonce'] = 0
next_star_log['events_hash'] = util.hash_events(next_star_log['events'])
next_star_log['log_header'] = util.concat_star_log_header(next_star_log)
next_star_log['height'] = 0 if is_genesis else next_star_log['height'] + 1
if not is_genesis and util.is_difficulty_changing(next_star_log['height']):
# We have to recalculate the difficulty at this height.
previous_recalculation = database.get_star_log_at_height(next_star_log['previous_hash'], next_star_log['height'] - util.difficultyInterval())
previous_star_log = database.get_star_log(next_star_log['previous_hash'])
next_star_log['difficulty'] = util.calculate_difficulty(previous_recalculation['difficulty'], previous_star_log['time'] - previous_recalculation['time'])
found = False
tries = 0
check_interval = 10000000
next_check = check_interval
curr_started = datetime.now()
started = curr_started if start_time is None else start_time
last_checkin = curr_started
# This initial hash hangles the hashing of events and such.
next_star_log = util.hash_star_log(next_star_log)
current_difficulty = util.unpack_bits(next_star_log['difficulty'], True)
current_difficulty_leading_zeros = len(current_difficulty) - len(current_difficulty.lstrip('0'))
current_nonce = 0
log_prefix = util.concat_star_log_header(next_star_log, False)
current_hash = None
while not found:
current_hash = util.sha256('%s%s' % (log_prefix, current_nonce))
try:
validate.difficulty_unpacked(current_difficulty, current_difficulty_leading_zeros, current_hash, False)
found = True
break
except:
pass
if tries == next_check:
next_check = tries + check_interval
now = datetime.now()
if timeout < (now - curr_started).total_seconds():
raise ProbeTimeoutException('Probing timed out')
hashes_per_second = tries / (now - last_checkin).total_seconds()
elapsed_minutes = (now - started).total_seconds() / 60
print '\tProbing at %.0f hashes per second, %.1f minutes elapsed...' % (hashes_per_second, elapsed_minutes)
current_nonce += 1
if util.MAXIMUM_NONCE <= current_nonce:
current_nonce = 0
next_star_log['time'] = util.get_time()
log_prefix = util.concat_star_log_header(next_star_log, False)
tries += 1
if found:
next_star_log['nonce'] = current_nonce
next_star_log['log_header'] = util.concat_star_log_header(next_star_log)
next_star_log['hash'] = current_hash
else:
raise CommandException('Unable to probe a new starlog')
return next_star_log
def sync(params=None):
silent = putil.retrieve(params, '-s', True, False)
if putil.retrieve(params, '-f', True, False):
if not silent:
print 'Removing all locally cached starlogs'
database.initialize(True)
latest = database.get_star_log_latest()
latest_time = 0 if latest is None else latest['time']
all_results = []
last_count = util.starLogsMaxLimit()
offset = 0
while util.starLogsMaxLimit() == last_count:
results = get_request(STAR_LOGS_URL, {'since_time': latest_time, 'limit': util.starLogsMaxLimit(), 'offset': offset})
if results is None:
last_count = 0
else:
last_count = len(results)
offset += last_count
all_results += results
for result in all_results:
database.add_star_log(result)
if not silent:
print 'Syncronized %s starlogs' % len(all_results)
def render_chain(params=None):
# TODO: Fix bug that causes rendering to mess up after probing.
limit = 6
height = None
# TODO: Actually get height from parameters.
if putil.has_any(params):
if putil.has_single(params):
limit = putil.single_int(params)
else:
raise CommandException('Unsupported parameters')
highest = database.get_star_log_highest()
if highest is None:
raise CommandException('No starlogs to render, try "sync"')
height = highest['height'] if height is None else height
results = database.get_star_logs_at_height(height, limit)
strata = [(height, list(results))]
remaining = limit - len(results)
while 0 < height and remaining != 0:
height -= 1
ancestor_results = database.get_star_logs_at_height(height, remaining)
current_results = []
for ancestor in ancestor_results:
has_children = False
for result in results:
has_children = result['previous_hash'] == ancestor['hash']
if has_children:
break
results.append(ancestor)
if not has_children:
current_results.append(ancestor)
if current_results:
strata.append((height, current_results))
remaining = limit - len(current_results)
tree = Tree()
last_node = tree
count = len(strata)
for i in reversed(range(0, count)):
stratum = strata[i]
if i == 0:
for orphan in stratum[1]:
last_node.add_child(name=util.get_system_name(orphan['hash']))
else:
last_node = last_node.add_child()
for orphan in stratum[1]:
last_node.add_sister(name=util.get_system_name(orphan['hash']))
print tree
def render_systems(params=None):
figure = pyplot.figure()
axes = figure.add_subplot(111, projection='3d')
for currentSystem in database.get_star_log_hashes(from_highest=True):
current_position = util.get_cartesian(currentSystem)
xs = [current_position[0], current_position[0]]
ys = [current_position[1], current_position[1]]
zs = [0, current_position[2]]
axes.plot(xs, ys, zs)
axes.scatter(current_position[0], current_position[1], current_position[2], label=util.get_system_name(currentSystem))
axes.legend()
axes.set_title('Systems')
axes.set_xlabel('X')
axes.set_ylabel('Y')
axes.set_zlabel('Z')
pyplot.show()
def list_deployments(params=None):
verbose = putil.retrieve(params, '-v', True, False)
list_all = not putil.has_any(params) or putil.retrieve(params, '-a', True, False)
from_hash = None
if putil.retrieve(params, '-f', True, False):
from_hash_query = putil.retrieve_value(params, '-f', None)
if from_hash_query is None:
raise CommandException('A system hash fragment must be passed with the -f parameter')
from_hash = putil.natural_match(from_hash_query, database.get_star_log_hashes())
if from_hash is None:
raise CommandException('Unable to find a system hash containing %s' % from_hash_query)
if list_all:
list_all_deployments(from_hash, verbose)
return
hash_query = putil.single_str(params)
selected_hash = putil.natural_match(hash_query, database.get_star_log_hashes())
if selected_hash is None:
raise CommandException('Unable to find a system hash containing %s' % hash_query)
deployments = database.get_unused_events(from_star_log=from_hash, system_hash=selected_hash)
if verbose:
print pretty_json(deployments)
return
fleets = {}
for deployment in deployments:
fleet = deployment['fleet_hash']
count = deployment['count']
if fleet in fleets:
fleets[fleet] += count
else:
fleets[fleet] = count
result = 'No deployments in system %s' % util.get_system_name(selected_hash)
if fleets:
result = 'Deployments in star system %s' % util.get_system_name(selected_hash)
fleet_keys = fleets.keys()
for i in range(0, len(fleets)):
current_fleet = fleet_keys[i]
result += '\n - %s : %s' % (util.get_fleet_name(current_fleet), fleets[current_fleet])
print result
def list_all_deployments(from_star_log, verbose):
deployments = database.get_unused_events(from_star_log=from_star_log)
if verbose:
print pretty_json(deployments)
return
systems = {}
for deployment in deployments:
system = deployment['star_system']
fleet = deployment['fleet_hash']
count = deployment['count']
if system in systems:
current_system = systems[system]
else:
current_system = {}
systems[system] = current_system
if fleet in current_system:
current_system[fleet] += count
else:
current_system[fleet] = count
result = 'No deployments in any systems'
account_hash = util.sha256(database.get_account()['public_key'])
if systems:
result = 'Deployments in all systems'
system_keys = systems.keys()
for i in range(0, len(system_keys)):
current_system = system_keys[i]
result += '\n - %s' % util.get_system_name(current_system)
fleet_keys = systems[current_system].keys()
for fleet_key in range(0, len(fleet_keys)):
current_fleet = fleet_keys[fleet_key]
fleet_count = systems[current_system][current_fleet]
active_flag = '[CURR] ' if current_fleet == account_hash else ''
result += '\n%s\t - %s : %s' % (active_flag, util.get_fleet_name(current_fleet), fleet_count)
print result
def attack(params=None):
if not putil.has_at_least(params, 2):
raise CommandException('An origin system and fleet must be specified')
verbose = putil.retrieve(params, '-v', True, False)
abort = putil.retrieve(params, '-a', True, False)
origin_fragment = params[0]
enemy_fragment = params[1]
origin_hash = putil.natural_match(origin_fragment, database.get_star_log_hashes())
if origin_hash is None:
raise CommandException('Unable to find an origin system containing %s' % origin_fragment)
highest_hash = database.get_star_log_highest(origin_hash)['hash']
enemy_hash = putil.natural_match(enemy_fragment, database.get_fleets(highest_hash))
if enemy_hash is None:
raise CommandException('Unable to find a fleet containing %s' % enemy_fragment)
enemy_deployments = database.get_unused_events(highest_hash, origin_hash, enemy_hash)
if enemy_deployments is None:
raise CommandException('Fleet %s has no ships deployed in %s' % (util.get_fleet_name(enemy_hash), util.get_system_name(origin_hash)))
account_info = database.get_account()
friendly_hash = util.sha256(account_info['public_key'])
friendly_deployments = database.get_unused_events(highest_hash, origin_hash, friendly_hash)
friendly_count = 0
for friendly_deployment in friendly_deployments:
friendly_count += friendly_deployment['count']
if friendly_count == 0:
raise CommandException('None of your fleet is deployed to %s' % util.get_system_name(origin_hash))
# TODO: Break this out into its own get call.
attack_event = {
'fleet_hash': friendly_hash,
'fleet_key': account_info['public_key'],
'hash': None,
'inputs': [],
'outputs': [],
'signature': None,
'type': 'attack'
}
input_index = 0
enemy_count = 0
for enemy_deployment in enemy_deployments:
attack_event['inputs'].append(get_event_input(input_index, enemy_deployment['key']))
enemy_count += enemy_deployment['count']
input_index += 1
if friendly_count <= enemy_count:
break
friendly_count = 0
for friendly_deployment in friendly_deployments:
attack_event['inputs'].append(get_event_input(input_index, friendly_deployment['key']))
friendly_count += friendly_deployment['count']
input_index += 1
if enemy_count <= friendly_count:
break
if enemy_count < friendly_count:
attack_event['outputs'].append(get_event_output(0, friendly_count - enemy_count, friendly_hash, util.get_unique_key(), origin_hash, 'attack'))
elif friendly_count < enemy_count:
attack_event['outputs'].append(get_event_output(0, enemy_count - friendly_count, enemy_hash, util.get_unique_key(), origin_hash, 'attack'))
attack_event['hash'] = util.hash_event(attack_event)
attack_event['signature'] = util.rsa_sign(account_info['private_key'], attack_event['hash'])
if verbose:
print pretty_json(attack_event)
if not abort:
result = post_request(EVENTS_URL, attack_event)
prefix, postfix = SUCCESS_COLOR if result == 200 else ERROR_COLOR, DEFAULT_COLOR
print 'Posted attack event with response %s%s%s' % (prefix, result, postfix)
def jump(params=None):
verbose = putil.retrieve(params, '-v', True, False)
render = putil.retrieve(params, '-r', True, False)
abort = putil.retrieve(params, '-a', True, False)
# lossy = putil.retrieve(params, '-l', True, False)
# TODO: Add actual support for non-lossy jumps.
lossy = True
count = None
if not putil.has_any(params):
raise CommandException('Specify an origin and destination system')
if len(params) < 2:
raise CommandException('An origin and destination system must be specified')
origin_fragment = params[0]
destination_fragment = params[1]
if 2 < len(params) and isinstance(params[2], int):
count = int(params[2])
hashes = database.get_star_log_hashes()
origin_hash = putil.natural_match(origin_fragment, hashes)
if origin_hash is None:
raise CommandException('Unable to find an origin system containing %s' % origin_fragment)
destination_hash = putil.natural_match(destination_fragment, hashes)
if destination_hash is None:
raise CommandException('Unable to find a destination system containing %s' % destination_fragment)
if not database.get_star_logs_share_chain([origin_hash, destination_hash]):
raise CommandException('Systems %s and %s exist on different chains' % (util.get_system_name(origin_hash), util.get_system_name(destination_hash)))
highest_hash = database.get_star_log_highest(database.get_star_log_highest_from_list([origin_hash, destination_hash]))['hash']
account_info = database.get_account()
fleet_hash = util.sha256(account_info['public_key'])
deployments = database.get_unused_events(from_star_log=highest_hash, system_hash=origin_hash, fleet_hash=fleet_hash)
total_ships = 0
for deployment in deployments:
total_ships += deployment['count']
if count is None:
count = total_ships
lossy = True
elif total_ships < count:
raise CommandException('Not enough ships to jump from the origin system')
if count <= 0:
raise CommandException('A number of ships greater than zero must be specified for a jump')
# TODO: Insert support for non-lossy jumps here.
jump_cost = util.get_jump_cost(destination_hash, origin_hash, count)
if jump_cost == count:
raise CommandException('Unable to complete a jump where all ships would be lost')
jump_event = get_event_signature(fleet_hash, account_info['public_key'], event_type='jump')
inputs = []
input_index = 0
total_input_count = 0
for deployment in deployments:
total_input_count += deployment['count']
inputs.append(get_event_input(input_index, deployment['key']))
input_index += 1
if count <= total_input_count:
break
extra_ships = total_input_count - count
outputs = []
index = 0
jump_key = util.sha256('%s%s%s%s' % (util.get_time(), fleet_hash, origin_hash, destination_hash))
if 0 < extra_ships:
outputs.append(get_event_output(index, extra_ships, fleet_hash, util.get_unique_key(), origin_hash, 'jump'))
index += 1
outputs.append(get_event_output(index, count - jump_cost, fleet_hash, jump_key, destination_hash, 'jump'))
jump_event['inputs'] = inputs
jump_event['outputs'] = outputs
jump_event['hash'] = util.hash_event(jump_event)
jump_event['signature'] = util.rsa_sign(account_info['private_key'], jump_event['hash'])
if verbose:
print pretty_json(jump_event)
if render:
render_jump(origin_hash, destination_hash)
if not abort:
result = post_request(EVENTS_URL, jump_event)
prefix, postfix = SUCCESS_COLOR if result == 200 else ERROR_COLOR, DEFAULT_COLOR
print 'Posted jump event with response %s%s%s' % (prefix, result, postfix)
def render_jump(origin_hash, destination_hash):
highest = database.get_star_log_highest_from_list([origin_hash, destination_hash])
figure = pyplot.figure()
axes = figure.add_subplot(111, projection='3d')
for current_system in database.get_star_log_hashes(highest):
current_position = util.get_cartesian(current_system)
xs = [current_position[0], current_position[0]]
ys = [current_position[1], current_position[1]]
zs = [0, current_position[2]]
axes.plot(xs, ys, zs)
axes.scatter(current_position[0], current_position[1], current_position[2], label=util.get_system_name(current_system))
origin_position = util.get_cartesian(origin_hash)
destination_position = util.get_cartesian(destination_hash)
xs = [origin_position[0], destination_position[0]]
ys = [origin_position[1], destination_position[1]]
zs = [origin_position[2], destination_position[2]]
axes.plot(xs, ys, zs, linestyle=':')
axes.legend()
axes.set_title('Jump %s -> %s' % (util.get_system_name(origin_hash), util.get_system_name(destination_hash)))
axes.set_xlabel('X')
axes.set_ylabel('Y')
axes.set_zlabel('Z')
pyplot.show()
def render_jump_range(params=None):
if not putil.has_any(params):
raise CommandException('Specify an origin system to render the jump range from')
origin_fragment = putil.single_str(params)
destination_fragment = putil.retrieve_value(params, '-d', None)
hashes = database.get_star_log_hashes()
origin_hash = putil.natural_match(origin_fragment, hashes)
if origin_hash is None:
raise CommandException('Unable to find an origin system containing %s' % origin_fragment)
destination_hash = None
highest = None
if destination_fragment is not None:
destination_hash = putil.natural_match(destination_fragment, hashes)
if destination_hash is None:
raise CommandException('Unable to find a destination system containing %s' % destination_fragment)
if not database.get_star_logs_share_chain([origin_hash, destination_hash]):
raise CommandException('Systems %s and %s exist on different chains' % (util.get_system_name(origin_hash), util.get_system_name(destination_hash)))
highest = database.get_star_log_highest(database.get_star_log_highest_from_list([origin_hash, destination_hash]))['hash']
figure = pyplot.figure()
axes = figure.add_subplot(111, projection='3d')
hue_start = 0.327
hue_end = 0.0
hue_delta = hue_end - hue_start
for current_system in database.get_star_log_hashes(highest):
cost = util.get_jump_cost(origin_hash, current_system)
cost_hue = hue_start + (cost * hue_delta)
cost_value = 0.0 if cost == 1.0 else 1.0
color = pycolors.hsv_to_rgb([cost_hue, 0.7, cost_value])
current_position = util.get_cartesian(current_system)
xs = [current_position[0], current_position[0]]
ys = [current_position[1], current_position[1]]
zs = [0, current_position[2]]
axes.plot(xs, ys, zs, c=color)
marker = '^' if current_system == origin_hash else 'o'
axes.scatter(current_position[0], current_position[1], current_position[2], label=util.get_system_name(current_system), c=color, marker=marker)
if destination_hash is not None:
origin_position = util.get_cartesian(origin_hash)
destination_position = util.get_cartesian(destination_hash)
xs = [origin_position[0], destination_position[0]]
ys = [origin_position[1], destination_position[1]]
zs = [origin_position[2], destination_position[2]]
axes.plot(xs, ys, zs, linestyle=':')
axes.legend()
axes.set_title('Jump Range %s' % util.get_system_name(origin_hash))
axes.set_xlabel('X')
axes.set_ylabel('Y')
axes.set_zlabel('Z')
pyplot.show()
def system_position(params=None):
if not putil.has_single(params):
raise CommandException('An origin system must be specified')
origin_fragment = putil.single_str(params)
origin_hash = putil.natural_match(origin_fragment, database.get_star_log_hashes())
if origin_hash is None:
raise CommandException('Unable to find an origin system containing %s' % origin_fragment)
print '%s system is at %s' % (util.get_system_name(origin_hash), util.get_cartesian(origin_hash))
def system_distance(params=None):
if not putil.has_count(params, 2):
raise CommandException('An origin and destination system must be specified')
origin_fragment = params[0]
destination_fragment = params[1]
hashes = database.get_star_log_hashes()
origin_hash = putil.natural_match(origin_fragment, hashes)
if origin_hash is None:
raise CommandException('Unable to find an origin system containing %s' % origin_fragment)
destination_hash = putil.natural_match(destination_fragment, hashes)
if destination_hash is None:
raise CommandException('Unable to find a destination system containing %s' % destination_fragment)
if not database.get_star_logs_share_chain([origin_hash, destination_hash]):
raise CommandException('Systems %s and %s exist on different chains' % (util.get_system_name(origin_hash), util.get_system_name(destination_hash)))
print 'Distance between %s and %s is %s' % (util.get_system_name(origin_hash), util.get_system_name(destination_hash), util.get_distance(origin_hash, destination_hash))
def system_average_distances(params=None):
origin_hash = None
if putil.has_single(params):
origin_fragment = params[0]
origin_hash = putil.natural_match(origin_fragment, database.get_star_log_hashes())
if origin_hash is None:
raise CommandException('Unable to find an origin system containing %s' % origin_fragment)
total = 0
count = 0
if origin_hash:
for currentHash in database.get_star_log_hashes(origin_hash):
if currentHash == origin_hash:
continue
total += util.get_distance(currentHash, origin_hash)
count += 1
else:
hashes = database.get_star_log_hashes(from_highest=True)
for currentHash in hashes:
hashes = hashes[1:]
for targetHash in hashes:
total += util.get_distance(currentHash, targetHash)
count += 1
if count == 0:
print 'No systems to get the average distances of'
else:
average = total / count
if origin_hash is None:
print 'Average distance between all systems is %s' % average
else:
print 'Average distance to system %s is %s' % (util.get_system_name(origin_hash), average)
def system_maximum_distance(params=None):
system_min_max_distance(params)
def system_minimum_distance(params=None):
system_min_max_distance(params, False)
def system_min_max_distance(params=None, calculating_max=True):
modifier = 'Farthest' if calculating_max else 'Nearest'
origin_hash = None
if putil.has_single(params):
origin_fragment = params[0]
origin_hash = putil.natural_match(origin_fragment, database.get_star_log_hashes())
if origin_hash is None:
raise CommandException('Unable to find an origin system containing %s' % origin_fragment)
if origin_hash:
best_system = None
best_distance = 0 if calculating_max else 999999999
for current_hash in database.get_star_log_hashes(origin_hash):
if current_hash == origin_hash:
continue
dist = util.get_distance(origin_hash, current_hash)
if (calculating_max and best_distance < dist) or (not calculating_max and dist < best_distance):
best_system = current_hash
best_distance = dist
print '%s system from %s is %s, with a distance of %s' % (modifier, util.get_system_name(origin_hash), util.get_system_name(best_system), best_distance)
else:
hashes = database.get_star_log_hashes(from_highest=True)
best_system_origin = None
best_system_destination = None
best_distance = 0 if calculating_max else 999999999
for current_hash in hashes:
hashes = hashes[1:]
for targetHash in hashes:
dist = util.get_system_name(current_hash, targetHash)
if (calculating_max and best_distance < dist) or (not calculating_max and dist < best_distance):
best_system_origin = current_hash
best_system_destination = targetHash
best_distance = dist
print '%s systems are %s and %s, with a distance of %s' % (modifier, util.get_system_name(best_system_origin), util.get_system_name(best_system_destination), best_distance)
def transfer(params=None):
verbose = putil.retrieve(params, '-v', True, False)
abort = putil.retrieve(params, '-a', True, False)
if not putil.has_any(params):
raise CommandException('Another fleet hash must be specified')
to_fleet = putil.single_str(params)
try:
validate.field_is_sha256(to_fleet)
except:
raise CommandException('A complete fleet hash must be specified')
count = None
if putil.has_at_least(params, 2):
count = int(params[1])
if count <= 0:
raise CommandException('A valid number of ships must be specified')
account_info = database.get_account()
from_fleet = util.sha256(account_info['public_key'])
events = database.get_unused_events(fleet_hash=from_fleet)
if events is None:
raise CommandException('No ships are available to be transferred for fleet %s' % util.get_fleet_name(from_fleet))
total_count = 0
for event in events:
total_count += event['count']
if count is None:
count = total_count
elif total_count < count:
raise CommandException('Only %s are available to transfer' % total_count)
transfer_event = get_event_signature(from_fleet, account_info['public_key'], event_type='transfer')
remaining_count = count
overflow_count = 0
input_index = 0
output_index = 0
while 0 < remaining_count:
curr_input = events[input_index]
count_delta = curr_input['count']
if remaining_count < count_delta:
count_delta = count_delta - remaining_count
transfer_event['inputs'].append(get_event_input(input_index, curr_input['key']))
existing_output = [x for x in transfer_event['outputs'] if x['fleet_hash'] == to_fleet and x['star_system'] == curr_input['star_system']]
if existing_output:
# Add to existing output
existing_output = existing_output[0]
existing_output['count'] += count_delta
else:
transfer_event['outputs'].append(get_event_output(output_index, count_delta, to_fleet, util.get_unique_key(), curr_input['star_system'], 'transfer'))
output_index += 1
if count_delta != curr_input['count']:
# Leftover ships that need to be assigned back to the owner.
input_index += 1
transfer_event['outputs'].append(get_event_output(input_index, curr_input['count'] - count_delta, from_fleet, util.get_unique_key(), curr_input['star_system'], 'transfer'))
remaining_count = 0
else:
remaining_count -= curr_input['count']
input_index += 1
transfer_event['hash'] = util.hash_event(transfer_event)
transfer_event['signature'] = util.rsa_sign(account_info['private_key'], transfer_event['hash'])
if verbose:
print pretty_json(transfer_event)
if not abort:
result = post_request(EVENTS_URL, transfer_event)
prefix, postfix = SUCCESS_COLOR if result == 200 else ERROR_COLOR, DEFAULT_COLOR
print 'Posted transfer event with response %s%s%s' % (prefix, result, postfix)
def meta_content(params=None):
if not putil.has_any(params):
current_content = database.get_meta_content()
if current_content is None:
print 'No meta content set, use meta -s <content> to set one'
else:
print 'Meta content is "%s"' % current_content
return
if putil.retrieve(params, '-r', True, False):
database.set_meta_content(None)
print 'Meta content has been reset'
return
new_content = putil.retrieve_value(params, '-s', None)
if putil.retrieve(params, '-s', True, False):
if new_content is None:
raise CommandException('Specify a new meta content, if you want to specify none use the -r flag instead')
else:
raise CommandException('An unrecognized parameter was passed')
database.set_meta_content(new_content)
print 'Meta content set to "%s"' % new_content
def poll_input():
if platform.startswith('win'):
return_sequence = [13]
up_sequence = [224, 72]
down_sequence = [224, 80]
left_sequence = [224, 75]
right_sequence = [224, 77]
back_sequence = [8]
control_c_sequence = [3]
tab_sequence = [9]
double_escape_sequence = [27, 27]
else:
return_sequence = [13]
up_sequence = [27, 91, 65]
down_sequence = [27, 91, 66]
left_sequence = [27, 91, 68]
right_sequence = [27, 91, 67]
back_sequence = [127]
control_c_sequence = [3]
tab_sequence = [9]
double_escape_sequence = [27, 27]
special_sequences = [
tab_sequence,
return_sequence,
up_sequence,
down_sequence,
left_sequence,
right_sequence,
back_sequence,
control_c_sequence,
double_escape_sequence
]
alpha_numeric_range = range(32, 127)
chars = []
while True:
is_special = chars in special_sequences
if is_special:
break
char = ord(getch())
chars.append(char)
if len(chars) == 1 and char in alpha_numeric_range:
break
elif 1 < len(chars):
last_chars = chars[-2:]
if last_chars == double_escape_sequence:
chars = last_chars
is_special = True
break
alpha_numeric = ''
is_return = False
is_backspace = False
is_control_c = False
is_up = False
is_down = False
is_left = False
is_right = False
is_tab = False
is_double_escape = False
if is_special:
if chars == return_sequence:
is_return = True
elif chars == back_sequence:
is_backspace = True
elif chars == control_c_sequence:
is_control_c = True
elif chars == up_sequence:
is_up = True
elif chars == down_sequence:
is_down = True
elif chars == left_sequence:
is_left = True
elif chars == right_sequence:
is_right = True
elif chars == tab_sequence:
is_tab = True
elif chars == double_escape_sequence:
is_double_escape = True
else:
print 'Unrecognized special sequence %s' % chars
elif len(chars) == 1:
alpha_numeric = chr(chars[0])
else:
print 'Unrecognized alphanumeric sequence %s' % chars
return alpha_numeric, is_return, is_backspace, is_control_c, is_up, is_down, is_left, is_right, is_tab, is_double_escape
def main():
print 'Starting probe...'
rules = get_request(RULES_URL)
if not rules:
raise ValueError('null rules')
environ['DIFFICULTY_FUDGE'] = str(rules['difficulty_fudge'])
environ['DIFFICULTY_INTERVAL'] = str(rules['difficulty_interval'])
environ['DIFFICULTY_DURATION'] = str(rules['difficulty_duration'])
environ['DIFFICULTY_START'] = str(rules['difficulty_start'])
environ['SHIP_REWARD'] = str(rules['ship_reward'])
environ['CARTESIAN_DIGITS'] = str(rules['cartesian_digits'])
environ['JUMP_COST_MIN'] = str(rules['jump_cost_min'])
environ['JUMP_COST_MAX'] = str(rules['jump_cost_max'])
environ['JUMP_DIST_MAX'] = str(rules['jump_distance_max'])
environ['STARLOGS_MAX_BYTES'] = str(rules['star_logs_max_limit'])
environ['EVENTS_MAX_BYTES'] = str(rules['events_max_limit'])
environ['COMMAND_HISTORY'] = getenv('COMMAND_HISTORY', '100')
print 'Connected to %s\n\t - Fudge: %s\n\t - Interval: %s\n\t - Duration: %s\n\t - Starting Difficulty: %s\n\t - Ship Reward: %s' % (HOST_URL, util.difficultyFudge(), util.difficultyInterval(), util.difficultyDuration(), util.difficultyStart(), util.shipReward())
min_x, min_y, min_z = util.get_cartesian_minimum()
max_x, max_y, max_z = util.get_cartesian_maximum()
universe_size = '( %s, %s, %s ) - ( %s, %s, %s )' % (min_x, min_y, min_z, max_x, max_y, max_z)
print '\t - Universe Size: %s\n\t - Jump Cost: %s%% to %s%%\n\t - Jump Distance Max: %s' % (universe_size, util.jumpCostMinimum() * 100, util.jumpCostMaximum() * 100, util.jumpDistanceMaximum())
if AUTO_REBUILD:
print 'Automatically rebuilding database...'
database.initialize(AUTO_REBUILD)
sync()
if not database.get_accounts():
print 'Unable to find existing accounts, creating default...'
default_account = generate_account()
database.add_account(default_account)
database.set_account_active(default_account['name'])
elif database.get_account() is None:
print 'No active account, try "help account" for more information on selecting an active account'
all_commands = {
'info': create_command(
info,
'Displays information about the connected server'
),
'sync': create_command(
sync,
'Syncs the local cache with updates from the server',
[
'"-f" replaces the local cache with fresh results',
'"-s" silently executes the command'
]
),
'slog': create_command(
star_log,
'Retrieves the latest starlog'
),
'probe': create_command(
probe,
'Probes the starlog in the chain',
[
'Passing no arguments probes for a new starlog ontop of the highest chain member',
'"-g" probes for a new genesis starlog',
'"-v" prints the probed starlog to the console',
'"-s" silently executes the command',
'"-a" aborts without posting starlog to the server',
'"-d" allow duplicate events',
'"-f" probes for a starlog ontop of the best matching system',
'"-l" loop and probe again after posting to the server',
'"-w" number of seconds to wait before looping to probe again',
'"-b" blindly probe for new stars without syncing inbetween'
]
),
'meta': create_command(
meta_content,
'Retrieves or sets the meta content included in probed starlogs',
[
'Passing no arguments retrieves the current meta content being included with probed starlogs',
'"-s" sets a new meta content',
'"-r" resets the meta content to nothing'
]
),
'account': create_command(
account,
'Information about the current account',
[
'Passing no arguments gets the current account information',
'"-a" lists all accounts stored in persistent data',
'"-s" followed by an account name changes the current account to the specified one',
'"-c" followed by an account name creates a new account'
]
),
'rchain': create_command(
render_chain,
'Render starlog chain information to the command line',
[
'Passing no arguments renders the highest chains and their siblings',
'Passing an integer greater than zero renders that many chains'
]
),
'rsys': create_command(
render_systems,
'Render systems in an external plotter'
),
'ldeploy': create_command(
list_deployments,
'List deployments in the specified system',
[
'Passing a partial hash will list deployments in the best matching system',
'"-a" lists all systems with deployments',
'"-f" looks for deployments on the chain with the matching head'
]
),
'attack': create_command(
attack,
'Attack fleets in the specified system',
[
'Passing a partial origin and enemy fleet hash will attack the best matching fleet',
'"-v" prints the attack to the console',
'"-a" aborts without posting attack to the server'
]
),
'jump': create_command(
jump,
'Jump ships from one system to another',
[
'Passing partial origin and destination hashes will jump all ships from the origin system',
'Passing partial origin and destination hashes along with a valid number of ships will jump that many from the origin system',
'"-v" prints the jump to the console',
'"-r" renders the jump in an external plotter before executing it',
'"-a" aborts without posting jump to the server'
]
),
'transfer': create_command(
transfer,
'Transfer ships from one fleet to another',
[
'Passing a complete fleet hash will transfer all ships to that fleet',
'Passing a complete fleet hash and a valid number of ships will transfer that number of ships to that fleet',
'"-v" prints the transfer to the console',
'"-a" aborts without posting transfer to the server'
]
),
'jrange': create_command(
render_jump_range,
'Renders the range of jumps in an external plotter',
[
'Passing partial origin hash will render with that system in focus',
'"-d" followed by a destination hash will render a line between the best matching system and the origin'
]
),
'pos': create_command(
system_position,
'Calculates the coordinates of the specified system',
[
'Passing a partial hash will calculate the coordinate of the best matching system'
]
),
'dist': create_command(
system_distance,
'Calculates the distance between the specified systems',
[
'Passing a partial origin and destination hash will calculate the distance between the best matching systems'
]
),
'avgdist': create_command(
system_average_distances,
'Calculates the average distance between all systems',
[
'Passing no arguments will calculate the average distance between every system',
'Passing a partial origin will calculate the average distance to the best matching system'
]
),
'maxdist': create_command(
system_maximum_distance,
'Calculates the maximum distance between all systems',
[
'Passing no arguments will calculate the maximum distance between every system',
'Passing a partial origin will calculate the maximum distance to the best matching system'
]
),
'mindist': create_command(
system_minimum_distance,
'Calculates the minimum distance between all systems',
[
'Passing no arguments will calculate the minimum distance between every system',
'Passing a partial origin will calculate the minimum distance to the best matching system'
]
)
}
command_prefix = '> '
command = None
command_index = 0
command_history = -1
command_in_session = 0
while True:
if command is None:
command = ''
stdout.write('\r%s%s%s' % (command_prefix, command, CURSOR_ERASE_SEQUENCE))
stdout.write('\r%s' % (CURSOR_FORWARD_SEQUENCE % (command_index + len(command_prefix))))
alpha_numeric, is_return, is_backspace, is_control_c, is_up, is_down, is_left, is_right, is_tab, is_double_escape = poll_input()
old_command_index = command_index
old_command = command
if is_backspace:
if 0 < command_index:
if len(command) == command_index:
# We're at the end of the string
command = command[:-1]
else:
# We're in the middle of a string
command = command[:command_index - 1] + command[command_index:]
command_index -= 1
elif is_control_c:
break
elif is_up:
command_history = min(command_history + 1, database.count_commands() - 1)
command = database.get_command(command_history)
command_index = 0 if command is None else len(command)
elif is_down:
command_history = max(command_history - 1, -1)
if command_history < 0:
command = ''
else:
command = database.get_command(command_history)
command_index = 0 if command is None else len(command)
elif is_left:
if 0 < command_index:
command_index -= 1
elif is_right:
if command_index < len(command):
command_index += 1
elif alpha_numeric:
if len(command) == command_index:
command += alpha_numeric
else:
command = command[:command_index] + alpha_numeric + command[command_index:]
command_index += 1
if old_command != command:
stdout.write('\r%s%s%s%s%s' % (command_prefix, BOLD_COLOR, command, DEFAULT_COLOR, CURSOR_ERASE_SEQUENCE))
if old_command_index != command_index:
stdout.write('\r%s' % (CURSOR_FORWARD_SEQUENCE % (command_index + len(command_prefix))))
if is_return or is_double_escape:
stdout.write('\n')
if is_double_escape:
command = None
command_index = 0
command_history = -1
continue
if not is_return:
continue
try:
if not command:
print 'Type help for more commands'
continue
args = command.split(' ')
command_name = args[0]
command_args = args[1:]
selected_command = all_commands.get(command_name, None)
if not selected_command:
if command_name == 'help':
command_help(all_commands, command_args)
elif command_name == 'exit':
break
else:
print 'No command "%s" found, try typing help for more commands' % command
else:
if not command_args:
selected_command['function']()
else:
selected_command['function'](command_args)
except CommandException as exception:
print exception
except:
print_exception()
print 'Error with your last command'
database.add_command(command, util.get_time(), command_in_session)
command = None
command_index = 0
command_history = -1
command_in_session += 1
if __name__ == '__main__':
main()
stdout.write('\nExiting...\n')
|
mit
|
cdegroc/scikit-learn
|
benchmarks/bench_sgd_covertype.py
|
1
|
7493
|
"""
================================
Covertype dataset with dense SGD
================================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
Liblinear 11.8977s 0.0285s 0.2305
GaussianNB 3.5931s 0.6645s 0.3633
SGD 0.2924s 0.0114s 0.2300
CART 39.9829s 0.0345s 0.0476
RandomForest 794.6232s 1.0526s 0.0249
Extra-Trees 1401.7051s 1.1181s 0.0230
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
To run this example use your favorite python shell::
% ipython benchmark/bench_sgd_covertype.py
"""
from __future__ import division
print __doc__
# Author: Peter Prettenhoer <[email protected]>
# License: BSD Style.
# $Id$
from time import time
import os
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn import metrics
######################################################################
## Download the data, if not already on disk
if not os.path.exists('covtype.data.gz'):
# Download the data
import urllib
print "Downloading data, Please Wait (11MB)..."
opener = urllib.urlopen(
'http://archive.ics.uci.edu/ml/'
'machine-learning-databases/covtype/covtype.data.gz')
open('covtype.data.gz', 'wb').write(opener.read())
######################################################################
## Load dataset
print("Loading dataset...")
import gzip
f = gzip.open('covtype.data.gz')
X = np.fromstring(f.read().replace(",", " "), dtype=np.float64, sep=" ",
count=-1)
X = X.reshape((581012, 55))
f.close()
# class 1 vs. all others.
y = np.ones(X.shape[0]) * -1
y[np.where(X[:, -1] == 1)] = 1
X = X[:, :-1]
######################################################################
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
train_idx = idx[:522911]
test_idx = idx[522911:]
X_train = X[train_idx]
y_train = y[train_idx]
X_test = X[test_idx]
y_test = y[test_idx]
# free memory
del X
del y
######################################################################
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
######################################################################
## Print dataset statistics
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25),
X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25),
np.unique(y_train).shape[0]))
print("%s %d (%d, %d)" % ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == -1)))
print("%s %d (%d, %d)" % ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == -1)))
print("")
print("Training classifiers...")
print("")
######################################################################
## Benchmark classifiers
def benchmark(clf):
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
err = metrics.zero_one(y_test, pred) / float(pred.shape[0])
return err, train_time, test_time
######################################################################
## Train Liblinear model
liblinear_parameters = {
'loss': 'l2',
'penalty': 'l2',
'C': 1000,
'dual': False,
'tol': 1e-3,
}
liblinear_res = benchmark(LinearSVC(**liblinear_parameters))
liblinear_err, liblinear_train_time, liblinear_test_time = liblinear_res
######################################################################
## Train GaussianNB model
gnb_err, gnb_train_time, gnb_test_time = benchmark(GaussianNB())
######################################################################
## Train SGD model
sgd_parameters = {
'alpha': 0.001,
'n_iter': 2,
}
sgd_err, sgd_train_time, sgd_test_time = benchmark(SGDClassifier(
**sgd_parameters))
######################################################################
## Train CART model
cart_err, cart_train_time, cart_test_time = benchmark(
DecisionTreeClassifier(min_sample_split=5,
max_depth=None))
######################################################################
## Train RandomForest model
rf_err, rf_train_time, rf_test_time = benchmark(
RandomForestClassifier(n_estimators=20,
min_sample_split=5,
max_depth=None))
######################################################################
## Train Extra-Trees model
et_err, et_train_time, et_test_time = benchmark(
ExtraTreesClassifier(n_estimators=20,
min_sample_split=5,
max_depth=None))
######################################################################
## Print classification performance
print("")
print("Classification performance:")
print("===========================")
print("")
def print_row(clf_type, train_time, test_time, err):
print("%s %s %s %s" % (clf_type.ljust(12),
("%.4fs" % train_time).center(10),
("%.4fs" % test_time).center(10),
("%.4f" % err).center(10)))
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"error-rate"))
print("-" * 44)
print_row("Liblinear", liblinear_train_time, liblinear_test_time,
liblinear_err)
print_row("GaussianNB", gnb_train_time, gnb_test_time, gnb_err)
print_row("SGD", sgd_train_time, sgd_test_time, sgd_err)
print_row("CART", cart_train_time, cart_test_time, cart_err)
print_row("RandomForest", rf_train_time, rf_test_time, rf_err)
print_row("Extra-Trees", et_train_time, et_test_time, et_err)
print("")
print("")
|
bsd-3-clause
|
0todd0000/spm1d
|
spm1d/rft1d/examples/val_upx_3_T2_cluster.py
|
1
|
2122
|
from math import sqrt,log
import numpy as np
from matplotlib import pyplot
from spm1d import rft1d
eps = np.finfo(float).eps
def here_hotellingsT2(y):
N = y.shape[0]
m = np.matrix( y.mean(axis=0) )
T2 = []
for ii,mm in enumerate(m):
W = np.matrix( np.cov(y[:,ii,:].T, ddof=1) ) #estimated covariance
t2 = N * mm * np.linalg.inv(W) * mm.T
T2.append( float(t2) )
return np.asarray(T2)
#(0) Set parameters:
np.random.seed(0)
nResponses = 30
nComponents = 2
nNodes = 101
nIterations = 500 #this should be 1000 or larger
FWHM = 10.0
W0 = np.eye(nComponents)
interp = True
wrap = True
heights = [8, 10, 12, 14]
### derived parameters:
df = nComponents, nResponses-1 #p,m
### initialize RFT calculators:
calc = rft1d.geom.ClusterMetricCalculator()
rftcalc = rft1d.prob.RFTCalculator(STAT='T2', df=df, nodes=nNodes, FWHM=FWHM)
#(1) Generate Gaussian 1D fields, compute test stat:
T2 = []
generator = rft1d.random.GeneratorMulti1D(nResponses, nNodes, nComponents, FWHM, W0)
for i in range(nIterations):
y = generator.generate_sample()
t2 = here_hotellingsT2(y)
T2.append( t2 )
T2 = np.asarray(T2)
#(2) Maximum region size:
K0 = np.linspace(eps, 12, 21)
K = np.array([[calc.max_cluster_extent(yy, h, interp, wrap) for yy in T2] for h in heights])
P = np.array([(K>=k0).mean(axis=1) for k0 in K0]).T
P0 = np.array([[rftcalc.p.cluster(k0, h) for k0 in K0/FWHM] for h in heights])
#(3) Plot results:
pyplot.close('all')
colors = ['b', 'g', 'r', 'orange']
labels = ['u = %.1f'%h for h in heights]
ax = pyplot.axes()
for color,p,p0,label in zip(colors,P,P0,labels):
ax.plot(K0, p, 'o', color=color)
ax.plot(K0, p0, '-', color=color, label=label)
ax.plot([0,1],[10,10], 'k-', label='Theoretical')
ax.plot([0,1],[10,10], 'ko-', label='Simulated')
ax.set_xlabel('x', size=16)
ax.set_ylabel('P(k_max) > x', size=16)
ax.set_ylim(0, 0.30)
ax.legend()
ax.set_title('Upcrossing extent validations ($T^2$ fields)', size=20)
pyplot.show()
|
gpl-3.0
|
gpersistence/tstop
|
scripts/scatter_anim_example.py
|
1
|
2888
|
#TSTOP
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import sys
import csv
import os
import random
plt.ion()
class DynamicUpdate():
#Suppose we know the x range
min_x = 4
max_x = 6.5
def on_launch(self):
#Set up plot
self.figure, self.ax = plt.subplots(2)
self.pd_plotter, = self.ax[0].plot([],[], 'o')
self.ts_plotter, = self.ax[1].plot([2], 'r-')
#Autoscale on unknown axis and known lims on the other
#self.ax.set_autoscaley_on(True)
self.ax[0].set_xlim(self.min_x, self.max_x)
self.ax[0].set_ylim(self.min_x, self.max_x)
self.ax[1].set_autoscaley_on(True)
self.ax[1].set_autoscalex_on(True)
#Other stuff
#self.ax.grid()
#...
def on_running(self, xdata, ydata, ts_data_x, ts_data_y):
#Update data (with the new _and_ the old points)
self.pd_plotter.set_xdata(xdata)
self.pd_plotter.set_ydata(ydata)
self.ts_plotter.set_xdata(ts_data_x[len(ts_data_x)-1])
self.ts_plotter.set_ydata(ts_data_y[len(ts_data_y)-1])
#Need both of these in order to rescale
self.ax[1].relim()
self.ax[1].autoscale_view()
#We need to draw *and* flush
self.figure.canvas.draw()
self.figure.canvas.flush_events()
#Example
def __call__(self):
import numpy as np
import time
self.on_launch()
base_file = '../build/pd-'
first_pd = base_file+'10'+'.txt'
ts_filename = '../build/time_series.txt'
time_series_file = open(ts_filename,'r')
time_series_reader = csv.reader(time_series_file, delimiter=' ')
all_ts_vals = np.array([[x[0],x[1]] for x in time_series_reader])
for t in range(10,40000):
pd_filename = base_file+str(t)+'.txt'
pd_file = open(pd_filename,'r')
pd_reader = csv.reader(pd_file, delimiter=' ')
pd = np.array([[float(x[0]),float(x[1])] for x in pd_reader if not (float(x[0]) == float(x[1]))])
ts_vals_x = all_ts_vals[10:t+1,0]
ts_vals_y = all_ts_vals[10:t+1,1]
self.on_running(pd[:,0], pd[:,1], ts_vals_x,ts_vals_y)
#time.sleep(0.01)
return xdata, ydata
d = DynamicUpdate()
d()
|
gpl-3.0
|
jpautom/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
226
|
3941
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
|
bsd-3-clause
|
Averroes/statsmodels
|
statsmodels/tools/testing.py
|
23
|
1443
|
"""assert functions from numpy and pandas testing
"""
import re
from distutils.version import StrictVersion
import numpy as np
import numpy.testing as npt
import pandas
import pandas.util.testing as pdt
# for pandas version check
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def is_pandas_min_version(min_version):
'''check whether pandas is at least min_version
'''
from pandas.version import short_version as pversion
return StrictVersion(strip_rc(pversion)) >= min_version
# local copies, all unchanged
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_equal, assert_array_less,
assert_array_max_ulp, assert_raises, assert_string_equal, assert_warns)
# adjusted functions
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
if not is_pandas_min_version('0.14.1'):
npt.assert_equal(actual, desired, err_msg='', verbose=True)
else:
if isinstance(desired, pandas.Index):
pdt.assert_index_equal(actual, desired)
elif isinstance(desired, pandas.Series):
pdt.assert_series_equal(actual, desired, **kwds)
elif isinstance(desired, pandas.DataFrame):
pdt.assert_frame_equal(actual, desired, **kwds)
else:
npt.assert_equal(actual, desired, err_msg='', verbose=True)
|
bsd-3-clause
|
dangeles/tissue_enrichment_tool_hypergeometric_test
|
tea_paper_docs/src/hgf_benchmark_script.py
|
4
|
14884
|
# -*- coding: utf-8 -*-
"""
A script to benchmark TEA.
@david angeles
[email protected]
"""
import tissue_enrichment_analysis as tea # the library to be used
import pandas as pd
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import re
import matplotlib as mpl
sns.set_context('paper')
# pd.set_option('display.float_format', lambda x:'%f'%x)
pd.set_option('precision', 3)
# this script generates a few directories.
dirOutput = '../output/'
dirSummaries = '../output/SummaryInformation/'
dirHGT25_any = '../output/HGT25_any_Results/'
dirHGT33_any = '../output/HGT33_any_Results/'
dirHGT50_any = '../output/HGT50_any_Results/'
dirHGT100_any = '../output/HGT100_any_Results/'
dirComp = '../output/comparisons/'
DIRS = [dirOutput, dirSummaries, dirHGT25_any,
dirHGT50_any, dirHGT100_any, dirComp]
# open the relevant file
path_sets = '../input/genesets_golden/'
path_dicts = '../input/WS252AnatomyDictionary/'
# Make all the necessary dirs if they don't already exist
for d in DIRS:
if not os.path.exists(d):
os.makedirs(d)
# Make the file that will hold the summaries and make the columns.
with open(dirSummaries+'ExecutiveSummary.csv', 'w') as fSum:
fSum.write('#Summary of results from all benchmarks\n')
fSum.write('NoAnnotations,Threshold,Method,EnrichmentSetUsed,TissuesTested,GenesSubmitted,TissuesReturned,GenesUsed,AvgFold,AvgQ,GenesInDict\n')
# ==============================================================================
# ==============================================================================
# # Perform the bulk of the analysis, run every single dictionary on every set
# ==============================================================================
# ==============================================================================
i = 0
# look in the dictionaries
for folder in os.walk(path_dicts):
# open each one
for f_dict in folder[2]:
if f_dict == '.DS_Store':
continue
tissue_df = pd.read_csv(path_dicts+f_dict)
# tobedropped when tissue dictionary is corrected
annot, thresh = re.findall(r"[-+]?\d*\.\d+|\d+", f_dict)
annot = int(annot)
thresh = float(thresh) # typecasting
method = f_dict[-7:-4]
ntiss = len(tissue_df.columns)
ngenes = tissue_df.shape[0]
# open each enrichment set
for fodder in os.walk(path_sets):
for f_set in fodder[2]:
df = pd.read_csv(path_sets + f_set)
test = df.gene.values
ntest = len(test)
short_name = f_set[16:len(f_set)-16]
df_analysis, unused = tea.enrichment_analysis(test, tissue_df,
alpha=0.05,
show=False)
# save the analysis to the relevant folder
savepath = '../output/HGT'+annot + '_' + method + '_Results/'
df_analysis.to_csv(savepath + f_set+'.csv', index=False)
tea.plot_enrichment_results(df_analysis,
save='savepath'+f_set+'Graph',
ftype='pdf')
nana = len(df_analysis) # len of results
nun = len(unused) # number of genes dropped
avf = df_analysis['Enrichment Fold Change'].mean()
avq = df_analysis['Q value'].mean()
s = '{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10}'.format(
annot, thresh, method, f_set, ntiss, ntest, nana,
ntest-nun, avf, avq, ngenes)
with open(dirSummaries+'ExecutiveSummary.csv', 'a+') as fSum:
fSum.write(s)
fSum.write('\n')
# Print summary to csv
df_summary = pd.read_csv(dirSummaries+'ExecutiveSummary.csv', comment='#')
# some entries contain nulls. before I remove them, I can inspect them
df_summary.isnull().any()
indexFold = df_summary['AvgFold'].index[df_summary['AvgFold'].apply(np.isnan)]
indexQ = df_summary['AvgQ'].index[df_summary['AvgQ'].apply(np.isnan)]
df_summary.ix[indexFold[0]]
df_summary.ix[indexQ[5]]
# kill all nulls!
df_summary.dropna(inplace=True)
# calculate fraction of tissues that tested significant in each run
df_summary['fracTissues'] = df_summary['TissuesReturned']/df_summary[
'TissuesTested']
df_summary.sort_values(['NoAnnotations', 'Threshold', 'Method'], inplace=True)
# ==============================================================================
# ==============================================================================
# # Plot summary graphs
# ==============================================================================
# ==============================================================================
sel = lambda x, y, z: ((df_summary.NoAnnotations == x) &
(df_summary.Threshold == y) & (df_summary.Method == z))
# KDE of the fraction of all tissues that tested significant
# one color per cutoff
cols = ['#1b9e77', '#d95f02', '#7570b3', '#e7298a', '#66a61e']
ls = ['-', '--', ':'] # used with varying thresh
thresh = df_summary.Threshold.unique()
NoAnnotations = df_summary.NoAnnotations.unique()
def resplot(column, method='any'):
"""
A method to quickly plot all combinations of cutoffs, thresholds.
All cutoffs are same color
All Thresholds are same line style
Parameters:
column -- the column to select
method -- the method used to specify similarity metrics
"""
for j, annots in enumerate(NoAnnotations):
for i, threshold in enumerate(thresh):
if threshold == 1:
continue
s = sel(annots, threshold, method)
df_summary[s][column].plot('kde', color=cols[j], ls=ls[i], lw=4,
label='Annotation Cut-off: {0}, \
Threshold: {1}'.format(annots,
threshold))
resplot('fracTissues')
plt.xlabel('Fraction of all tissues that tested significant')
plt.xlim(0, 1)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'fractissuesKDE_method=any.pdf')
plt.close()
resplot('AvgQ', method='avg')
plt.xlabel('Fraction of all tissues that tested significant')
plt.xlim(0, 0.05)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'avgQKDE_method=avg.pdf')
plt.close()
resplot('AvgQ')
plt.xlabel('AvgQ value')
plt.xlim(0, .05)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'avgQKDE_method=any.pdf')
plt.close()
# KDE of the fraction of avgFold
resplot('AvgFold')
plt.xlabel('Avg Fold Change value')
plt.xlim(0, 15)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'avgFoldChangeKDE.pdf')
plt.close()
def line_prepender(filename, line):
"""Given filename, open it and prepend 'line' at beginning of the file."""
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
# ==============================================================================
# ==============================================================================
# # Detailed analysis of 25 and 50 genes per node dictionaries
# ==============================================================================
# ==============================================================================
def walker(tissue_df, directory, save=True):
"""Given the tissue dictionary and a directory to save to,
open all the gene sets, analyze them and deposit the results in the
specified directory.
Parameters:
-------------------
tissue_df - pandas dataframe containing specified tissue dictionary
directory - where to save to
save - boolean indicating whether to save results or not.
"""
with open(directory+'empty.txt', 'w') as f:
f.write('Genesets with no enrichment:\n')
# go through each file in the folder
for fodder in os.walk(path_sets):
for f_set in fodder[2]:
# open df
df = pd.read_csv(path_sets + f_set)
# extract gene list and analyze
short_name = f_set
test = df.gene.values
df_analysis, unused = tea.enrichment_analysis(test, tissue_df,
show=False)
# if it's not empty and you want to save:
if df_analysis.empty is False & save:
# save without index
df_analysis.to_csv(directory+short_name+'.csv', index=False)
# add a comment
line = '#' + short_name+'\n'
line_prepender(directory+short_name+'.csv', line)
# plot
tea.plot_enrichment_results(df_analysis, title=short_name,
dirGraphs=directory, ftype='pdf')
plt.close()
# if it's empty and you want to save, place it in file called empty
if df_analysis.empty & save:
with open(directory+'empty.txt', 'a+') as f:
f.write(short_name+'\n')
def compare(resA, resB, l, r):
"""Given two results (.csv files output by tea), open and compare them,
concatenate the dataframes
Parameters:
resA, resB -- filenames that store the dfs
l, r -- suffixes to attach to the columns post merge
Returns:
result - a dataframe that is the outer merger of resA, resB
"""
# open both dfs.
df1 = pd.read_csv(resA, comment='#')
df2 = pd.read_csv(resB, comment='#')
# drop observed column from df1
df1.drop('Observed', axis=1, inplace=True)
df2.drop('Observed', axis=1, inplace=True)
# make a dummy column, key for merging
df1['key'] = df1['Tissue']
df2['key'] = df2['Tissue']
# find the index of each tissue in either df
result = pd.merge(df1, df2, on='key', suffixes=[l, r], how='outer')
# sort by q val and drop non useful columns
# result.sort_values('Q value{0}'.format(l))
result.drop('Tissue{0}'.format(l), axis=1, inplace=True)
result.drop('Tissue{0}'.format(r), axis=1, inplace=True)
result['Tissue'] = result['key']
# drop key
result.drop('key', axis=1, inplace=True)
result.sort_values(['Q value%s' % (l), 'Q value%s' % (r)], inplace=True)
# drop Expected values
result.drop(['Expected%s' % (l), 'Expected%s' % (r)], axis=1, inplace=True)
# rearrange columns
cols = ['Tissue', 'Q value%s' % (l), 'Q value%s' % (r),
'Enrichment Fold Change%s' % (l), 'Enrichment Fold Change%s' % (r)]
result = result[cols]
# drop observed
return result # return result
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff25_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT25_any)
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff50_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT50_any)
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff100_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT100_any)
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff33_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT33_any)
grouped = df_summary.groupby(['NoAnnotations', 'Threshold', 'Method'])
with open('../doc/figures/TissueNumbers.csv', 'w') as f:
f.write('Annotation Cutoff,Similarity Threshold,Method')
f.write(',No. Of Terms in Dictionary\n')
for key, group in grouped:
f.write('{0},{1},{2},{3}\n'.format(key[0], key[1], key[2],
group.TissuesTested.unique()[0]))
tissue_data = pd.read_csv('../output/SummaryInformation/TissueNumbers.csv')
sel = lambda y, z: ((tissue_data.iloc[:, 1] == y) &
(tissue_data.iloc[:, 2] == z))
# KDE of the fraction of all tissues that tested significant
cols = ['#1b9e77', '#d95f02', '#7570b3'] # used with varying colors
thresh = df_summary.Threshold.unique()
NoAnnotations = df_summary.NoAnnotations.unique()
# def resplot(column, cutoff=25, method='any'):
# """
# A method to quickly plot all combinations of cutoffs, thresholds.
# All cutoffs are same color
# All Thresholds are same line style
# """
# for i, threshold in enumerate(thresh):
# ax = plt.gca()
# ax.grid(False)
# if threshold == 1:
# continue
# tissue_data[sel(threshold, method)].plot(x='No. Of Annotations',
# y='No. Of Tissues in Dictionary',
# kind='scatter',
# color=cols[i],
# ax=ax, s=50, alpha=.7)
# ax.set_xlim(20, 110)
# ax.set_xscale('log')
# ax.set_xticks([25, 33, 50, 100])
# ax.get_xaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
#
# ax.set_ylim(25, 1000)
# ax.set_yscale('log')
# ax.set_yticks([50, 100, 250, 500])
# ax.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
#
#
# resplot('No. Of Tissues in Dictionary')
a = '../output/HGT33_any_Results/WBPaper00024970_GABAergic_neuron_specific_WBbt_0005190_247.csv'
b = '../output/HGT33_any_Results/WBPaper00037950_GABAergic-motor-neurons_larva_enriched_WBbt_0005190_132.csv'
df = compare(a, b, 'Spencer', 'Watson')
df.to_csv('../output/comparisons/neuronal_comparison_33_WBPaper00024970_with_WBPaper0037950_complete.csv',
index=False, na_rep='-', float_format='%.2g')
a = '../output/HGT33_any_Results/WBPaper00037950_GABAergic-motor-neurons_larva_enriched_WBbt_0005190_132.csv'
b = '../output/HGT50_any_Results/WBPaper00037950_GABAergic-motor-neurons_larva_enriched_WBbt_0005190_132.csv'
df = compare(a, b, '33', '50')
df.to_csv('../output/comparisons/neuronal_comparison_GABAergic_33-50_WBPaper0037950_complete.csv',
index=False, na_rep='-', float_format='%.2g')
a = '../output/HGT33_any_Results/WBPaper00024970_GABAergic_neuron_specific_WBbt_0005190_247.csv'
b = '../output/HGT50_any_Results/WBPaper00024970_GABAergic_neuron_specific_WBbt_0005190_247.csv'
df = compare(a, b, '-33', '-50')
# print to figures
df.head(10).to_csv('../doc/figures/dict-comparison-50-33.csv', index=False,
na_rep='-', float_format='%.2g')
df.to_csv('../output/comparisons/neuronal_comparison_Pan_Neuronal_33-50_WBPaper0031532_complete.csv',
index=False, na_rep='-', float_format='%.2g')
|
mit
|
ndingwall/scikit-learn
|
sklearn/ensemble/__init__.py
|
12
|
1655
|
"""
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
import typing
from ._base import BaseEnsemble
from ._forest import RandomForestClassifier
from ._forest import RandomForestRegressor
from ._forest import RandomTreesEmbedding
from ._forest import ExtraTreesClassifier
from ._forest import ExtraTreesRegressor
from ._bagging import BaggingClassifier
from ._bagging import BaggingRegressor
from ._iforest import IsolationForest
from ._weight_boosting import AdaBoostClassifier
from ._weight_boosting import AdaBoostRegressor
from ._gb import GradientBoostingClassifier
from ._gb import GradientBoostingRegressor
from ._voting import VotingClassifier
from ._voting import VotingRegressor
from ._stacking import StackingClassifier
from ._stacking import StackingRegressor
if typing.TYPE_CHECKING:
# Avoid errors in type checkers (e.g. mypy) for experimental estimators.
# TODO: remove this check once the estimator is no longer experimental.
from ._hist_gradient_boosting.gradient_boosting import ( # noqa
HistGradientBoostingRegressor, HistGradientBoostingClassifier
)
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier", "VotingRegressor",
"StackingClassifier", "StackingRegressor",
]
|
bsd-3-clause
|
agoose77/hivesystem
|
manual/movingpanda/panda-14d.py
|
1
|
11053
|
import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import dragonfly.scene.unbound, dragonfly.scene.bound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
import dragonfly.convert.pull
import dragonfly.logic
import dragonfly.bind
import Spyder
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = Spyder.AxisSystem()
a.rotateZ(360 * random())
a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "AxisSystem")
def id_generator():
n = 0
while 1:
n += 1
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d
from bee.mstr import mstr
class parameters: pass
class myscene(dragonfly.pandahive.spyderframe):
a = Spyder.AxisSystem()
a *= 0.25
a.origin += (-8, 42, 0)
env = Spyder.Model3D("models/environment", "egg", a)
#First panda
a = Spyder.AxisSystem()
a *= 0.005
pandaclass = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
actorclassname="pandaclass")
box = Spyder.Box2D(50, 470, 96, 96)
icon = Spyder.Icon("pandaicon.png", "pandaicon", box, transparency=True)
#Second panda
a = Spyder.AxisSystem()
a *= 0.002
pandaclass2 = Spyder.ActorClass3D("models/panda-model", "egg", [("walk", "models/panda-walk4", "egg")], a,
actorclassname="pandaclass2")
box = Spyder.Box2D(200, 500, 48, 48)
icon2 = Spyder.Icon("pandaicon.png", "pandaicon2", box, transparency=True)
#Third panda
a = Spyder.AxisSystem()
a *= 0.3
model = Spyder.Model3D("models/panda", "egg", a)
pandaclass3 = Spyder.EntityClass3D("pandaclass3", [model])
box = Spyder.Box2D(280, 480, 144, 112)
icon3 = Spyder.Icon("pandaicon2.png", "pandaicon3", box, transparency=True)
camcenter = Spyder.Entity3D(
"camcenter",
(
Spyder.NewMaterial("white", color=(255, 255, 255)),
Spyder.Block3D((1, 1, 1), material="white"),
)
)
marker = Spyder.Entity3D(
"marker",
(
Spyder.NewMaterial("blue", color=(0, 0, 255)),
Spyder.Circle(2, origin=(0, 0, 0.1), material="blue")
)
)
del a, box, model
from bee.spyderhive.hivemaphive import hivemapinithive
class pandawalkhive(hivemapinithive):
pandahivemap = Spyder.Hivemap.fromfile("pandawalk.web")
class pandawalkhive2(hivemapinithive):
pandahivemap = Spyder.Hivemap.fromfile("pandawalk2.web")
from jumpworker2 import jumpworker2
class jumpworkerhive(bee.frame):
height = bee.parameter("float")
duration = bee.parameter("float")
i = dragonfly.time.interval_time(time=bee.get_parameter("duration"))
startconnector = dragonfly.std.pushconnector("trigger")()
connect(startconnector, i.start)
start = bee.antenna(startconnector.inp)
jump = jumpworker2(height=bee.get_parameter("height"))
connect(i, jump)
t_jump = dragonfly.std.transistor("float")()
connect(jump, t_jump)
dojump = dragonfly.scene.bound.setZ()
connect(t_jump, dojump)
tick = dragonfly.io.ticksensor(False)
connect(tick, t_jump)
connect(startconnector, tick.start)
connect(i.reach_end, tick.stop)
class pandajumphive(bee.inithive):
ksp = dragonfly.io.keyboardsensor_trigger("SPACE")
jump = jumpworkerhive(height=4.0, duration=0.7)
connect(ksp, jump)
from bee.staticbind import staticbind_baseclass
class pandabind(dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind,
dragonfly.bind.bind):
bind_entity = "relative"
bind_keyboard = "indirect"
class camerabindhive(hivemapinithive):
camera_hivemap = Spyder.Hivemap.fromfile("camera.web")
class camerabind(staticbind_baseclass,
dragonfly.event.bind,
dragonfly.io.bind,
dragonfly.sys.bind,
dragonfly.scene.bind,
dragonfly.time.bind):
hive = camerabindhive
class myhive(dragonfly.pandahive.pandahive):
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
camerabind = camerabind().worker()
camcenter = dragonfly.std.variable("id")("camcenter")
connect(camcenter, camerabind.bindname)
startsensor = dragonfly.sys.startsensor()
cam = dragonfly.scene.get_camera()
camparent = dragonfly.scene.unbound.parent()
connect(cam, camparent.entityname)
connect(camcenter, camparent.entityparentname)
connect(startsensor, camparent)
cphide = dragonfly.scene.unbound.hide()
connect(camcenter, cphide)
connect(startsensor, cphide)
v_marker = dragonfly.std.variable("id")("marker")
hide_marker = dragonfly.scene.unbound.hide()
connect(v_marker, hide_marker)
show_marker = dragonfly.scene.unbound.show()
connect(v_marker, show_marker)
parent_marker = dragonfly.scene.unbound.parent()
connect(v_marker, parent_marker.entityname)
connect(startsensor, hide_marker)
pandaspawn = dragonfly.scene.spawn_actor_or_entity()
v_panda = dragonfly.std.variable("id")("")
connect(v_panda, pandaspawn)
panda_id_gen = dragonfly.std.generator("id", id_generator)()
panda_id = dragonfly.std.variable("id")("")
t_panda_id_gen = dragonfly.std.transistor("id")()
connect(panda_id_gen, t_panda_id_gen)
connect(t_panda_id_gen, panda_id)
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
hivereg = dragonfly.bind.hiveregister()
c_hivereg = bee.configure("hivereg")
pandabinder = pandabind().worker()
v_hivename = dragonfly.std.variable("id")("")
w_bind = dragonfly.std.weaver(("id", "id"))()
connect(panda_id, w_bind.inp1)
connect(v_hivename, w_bind.inp2)
t_bind = dragonfly.std.transistor("id")()
connect(panda_id, t_bind)
t_bind2 = dragonfly.std.transistor(("id", "id"))()
connect(w_bind, t_bind2)
connect(t_bind2, pandabinder.bind)
sel = dragonfly.logic.selector()
connect(t_bind, sel.register_and_select)
selected = dragonfly.std.variable("id")("")
connect(t_bind, selected)
t_get_selected = dragonfly.logic.filter("trigger")()
connect(sel.empty, t_get_selected)
tt_get_selected = dragonfly.std.transistor("id")()
do_select = dragonfly.std.pushconnector("trigger")()
connect(t_get_selected.false, do_select)
connect(do_select, tt_get_selected)
connect(sel.selected, tt_get_selected)
connect(tt_get_selected, selected)
disp_sel = dragonfly.io.display("id")("Selected: ")
connect(tt_get_selected, disp_sel)
connect(selected, parent_marker.entityparentname)
connect(do_select, show_marker)
connect(do_select, parent_marker)
key_tab = dragonfly.io.keyboardsensor_trigger("TAB")
connect(key_tab, sel.select_next)
connect(key_tab, t_get_selected)
key_bsp = dragonfly.io.keyboardsensor_trigger("BACKSPACE")
connect(key_bsp, sel.select_prev)
connect(key_bsp, t_get_selected)
kill = dragonfly.std.pushconnector("trigger")()
t_kill = dragonfly.std.transistor("id")()
connect(selected, t_kill)
connect(t_kill, pandabinder.stop)
remove = dragonfly.scene.unbound.remove_actor_or_entity()
connect(t_kill, remove)
disp_kill = dragonfly.io.display("id")("Killed: ")
connect(t_kill, disp_kill)
connect(kill, t_kill)
connect(kill, sel.unregister)
connect(kill, hide_marker)
connect(kill, t_get_selected)
testkill = dragonfly.logic.filter("trigger")()
connect(sel.empty, testkill)
connect(testkill.false, kill)
key_k = dragonfly.io.keyboardsensor_trigger("K")
connect(key_k, testkill)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
trig_spawn = dragonfly.std.pushconnector("trigger")()
connect(trig_spawn, t_panda_id_gen)
connect(trig_spawn, do_spawn)
connect(trig_spawn, t_bind)
connect(trig_spawn, t_bind2)
connect(trig_spawn, do_select)
#First panda
v_panda1 = dragonfly.std.variable("id")("pandaclass")
set_panda1 = dragonfly.std.transistor("id")()
connect(v_panda1, set_panda1)
connect(set_panda1, v_panda)
c_hivereg.register_hive("pandawalk", pandawalkhive)
v_hivename1 = dragonfly.std.variable("id")("pandawalk")
set_hivename1 = dragonfly.std.transistor("id")()
connect(v_hivename1, set_hivename1)
connect(set_hivename1, v_hivename)
pandaicon_click = dragonfly.io.mouseareasensor("pandaicon")
connect(pandaicon_click, set_panda1)
connect(pandaicon_click, set_hivename1)
connect(pandaicon_click, trig_spawn)
#Second panda
v_panda2 = dragonfly.std.variable("id")("pandaclass2")
set_panda2 = dragonfly.std.transistor("id")()
connect(v_panda2, set_panda2)
connect(set_panda2, v_panda)
c_hivereg.register_hive("pandawalk2", pandawalkhive2)
v_hivename2 = dragonfly.std.variable("id")("pandawalk2")
set_hivename2 = dragonfly.std.transistor("id")()
connect(v_hivename2, set_hivename2)
connect(set_hivename2, v_hivename)
pandaicon2_click = dragonfly.io.mouseareasensor("pandaicon2")
connect(pandaicon2_click, set_panda2)
connect(pandaicon2_click, set_hivename2)
connect(pandaicon2_click, trig_spawn)
#Third panda
v_panda3 = dragonfly.std.variable("id")("pandaclass3")
set_panda3 = dragonfly.std.transistor("id")()
connect(v_panda3, set_panda3)
connect(set_panda3, v_panda)
c_hivereg.register_hive("pandajump", pandajumphive)
v_hivename3 = dragonfly.std.variable("id")("pandajump")
set_hivename3 = dragonfly.std.transistor("id")()
connect(v_hivename3, set_hivename3)
connect(set_hivename3, v_hivename)
pandaicon3_click = dragonfly.io.mouseareasensor("pandaicon3")
connect(pandaicon3_click, set_panda3)
connect(pandaicon3_click, set_hivename3)
connect(pandaicon3_click, trig_spawn)
myscene = myscene(
scene="scene",
canvas=canvas,
mousearea=mousearea,
)
wininit = bee.init("window")
wininit.camera.setPos(0, 45, 25)
wininit.camera.setHpr(180, -20, 0)
keyboardevents = dragonfly.event.sensor_match_leader("keyboard")
add_head = dragonfly.event.add_head()
head = dragonfly.convert.pull.duck("id", "event")()
connect(selected, head)
connect(keyboardevents, add_head)
connect(head, add_head)
connect(add_head, pandabinder.event)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
main.run()
|
bsd-2-clause
|
wanggang3333/scikit-learn
|
sklearn/neighbors/nearest_centroid.py
|
199
|
7249
|
# -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.