repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ibackus/custom_python_packages
|
clumps/clumpfinding.py
|
2
|
29475
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 5 12:10:08 2014
@author: ibackus
"""
# Multiprocessing modules
from multiprocessing import Pool, cpu_count
# Generic pacakges
import numpy as np
import pynbody
SimArray = pynbody.array.SimArray
import subprocess
import glob
import os
import matplotlib.pyplot as plt
import re
# 'Internal' packages
import isaac
def clump_tracker(fprefix, param=None, directory=None, nsmooth=32, verbose=True):
"""
Finds and tracks clumps over a simulation with multiple time steps and
calculates various physical properties of the clumps.
Runs all the steps necessary to find/track clumps, these are:
get_fnames
pFind_clumps
pClump_properties
pLink2
multilink
build_clumps
If the iord property is not found, the linking will only work if the number
of particles remains constant through the simulation
**ARGUMENTS**
fprefix : str
Prefix of the simulation outputs
param : str (recommended)
Filename of a .param file for the simulation
directory : str (optional)
Directory to search through. Default is current working directory
nsmooth : int (optional)
Number of nearest neighbors used for particle smoothing in the
simulation. This is used in the definition of a density threshold
for clump finding.
verbose : bool (optional)
Verbosity flag. Default is True
**RETURNS**
clump_list : list
A list containing dictionaries for all clumps foujohn obryan fiddlend in the simulation
See clump_properties for a list of the properties calculated for clumps
"""
# Get a list of all snapshot files
fnames = get_fnames(fprefix, directory)
nfiles = len(fnames)
# Run the clump (halo) finder
if verbose: print "\n\nRunning clump finder on {} files\n\n".format(nfiles)
clumpnum_list = pFind_clumps(fnames, nsmooth, param, verbose=verbose)
nclumps = np.zeros(nfiles, dtype=int)
for i, clumpnums in enumerate(clumpnum_list):
nclumps[i] = clumpnums.max()
if nclumps.max() <= 0:
if verbose: print 'No clumps found'
return []
# Calculate the physical properties of the clumps
if verbose: print "\n\nCalculating the physical of properties of clumps\n\n"
properties = pClump_properties(fnames, clumpnum_list)
# Link clumps on consecutive time-steps
if verbose: print "\n\nLinking Clumps\n\n"
link_list = pLink2(properties)
# Link on multiple time-steps
multilink_list = multilink(link_list)
# Build the clumps
clump_list = build_clumps(multilink_list, properties, fnames, param)
return clump_list
def get_fnames(fprefix, directory=None):
"""
Finds the filenames of ChaNGa simulation outputs. They are formatted as:
fprefix.000000
i.e., fprefix followed by '.' and a 6 digit number
**ARGUMENTS**
fprefix : str
Prefix of the simulation outputs
directory : str (optional)
Directory to search through. Default is current working directory
**RETURNS**
fnames : list
A list containing the matching filenames
"""
fnames = []
if directory is not None:
fprefix = os.path.join(directory, fprefix)
repattern = '^' + fprefix + '.(?:(?<!\d)\d{6}(?!\d))$'
for fname in glob.glob(fprefix + '.*'):
if re.search(repattern, fname) is not None:
fnames.append(fname)
fnames.sort()
return fnames
def blank_clump(clump_pars, nt=1):
"""
Generates a blank clump dictionary, using clump_pars
"""
ignore_keys = ['iord', 'ids']
keys = clump_pars.keys()
# Remove the ignored keys
for ignore_key in ignore_keys:
if ignore_key in keys:
keys.remove(ignore_key)
clump = {}
for key in keys:
# Load the current value
val = clump_pars[key]
# All values should be nd arrays or nd simarrays
shape = list(val.shape)
shape[0] = nt
# Initialize a blank array with the same dtype
val_array = np.ones(shape) * np.nan
# Check if there are units
if pynbody.units.has_units(val):
val_array = SimArray(val_array, val.units)
clump[key] = val_array
return clump
def build_clumps(multilink_list, clump_pars_list, fnames=None, param=None):
"""
Builds a list of clump dictionaries. The clump dictionaries contain various
properties as a function of time for the different clumps.
**ARGUMENTS**
multilink_list : list
A list of clump-link arrays (output of multilink)
clump_pars_list : list
List of dictionaries containing clump properties for all clumps on a
given time step (see pClump_properties)
fnames : list (recommended)
A list of the simulation snapshot filenames. If provided, the time
steps are included in the clumps
param : str (recommended)
Filename of a .param file for the simulation. Only used if fnames is
provided
**RETURNS**
clump_list : list
A list of clump dictionaries. Each clump dictionary gives various
physical properties for a clump as a function of time. A value of NaN
means the clump was not present at that time step.
"""
# Initialize list to contain all the clump objects (dictionaries)
clump_list = []
nt = len(clump_pars_list) # number of timesteps
nclumps = len(multilink_list) # number of clumps
if nclumps < 1:
return clump_list
# Find the time step of each simulation
if fnames is not None:
t = SimArray(np.zeros(nt), 'yr')
for i, fname in enumerate(fnames):
f = pynbody.load(fname, paramname=param)
t_unit = SimArray(1, f.infer_original_units('yr'))
# Note, for ChaNGa outputs, t0 = t_unit (ie, 1 in simulation units)
# To correct for this, we subtract off one time unit from the
# snapshot's time
t[i] = f.properties['time'] - t_unit
# Start by finding the first non-zero clump_pars (ie, first timestep with
# clumps)
for i, clump_pars in enumerate(clump_pars_list):
if clump_pars is not None:
iFirstClump = i
break
# Now fill in the clumps
for iClump in range(nclumps):
print iClump
# Initialize a blank clump
clump = blank_clump(clump_pars_list[iFirstClump], nt)
for iord, iStep in multilink_list[iClump]:
clump_pars = clump_pars_list[iStep]
for key in clump.keys():
clump[key][iStep] = clump_pars[key][iord]
clump['time'] = t.copy()
clump_list.append(clump)
return clump_list
def multilink(link_list):
"""
Links clumps on multiple time steps.
Given the output of link2 or pLink2 for multiple time-steps, this determines
every clump's ID as a function of time-step.
**ARGUMENTS**
link_list : list
A list of link arrays. link_list[i] contains the links between time
step i and i+1.
ie: link_list[i] = link2(clump_pars_list[i], clump_pars_list[i+1])
Same as the output from pLink2
**RETURNS**
clumpid_list : list
A list of 2D arrays. Each array gives pairs of (clumpID, time-step)
"""
n_links = len(link_list)
clump_list = []
iStart = 0
while iStart < n_links:
if link_list[iStart] is not None:
pairs0 = link_list[iStart]
new_mask = pairs0[:,0] == -1
new_iord = pairs0[new_mask,1]
for iord0 in new_iord:
t = iStart + 1
iPair = iStart + 1
# Initialize a new clump
clump = [ [iord0, t] ]
while iPair < n_links:
pairs1 = link_list[iPair]
if pairs1 is None:
# There are no clumps at this time step
break
t = iPair + 1
iord = clump[-1][0]
new_ind = np.nonzero(pairs1[:,0] == iord)[0]
if len(new_ind) > 0:
# The clump links to something in the next timestep
new_ind = int(new_ind)
# Add the new index to the clump
#clump.append([pairs1[new_ind, 1], t])
clump.append([new_ind, t])
# Increment the time step
iPair += 1
else:
# The clump links to nothing. It has died
break
clump_list.append(np.array(clump))
iStart += 1
return clump_list
def _parallel_link2(args):
"""
A parallel wrapper for link2
"""
return link2(*args)
def pLink2(clump_pars_list):
"""
A parallel (batch) implementation of links2 for linking clumps in
consecutive time-steps.
**ARGUMENTS**
clump_pars_list : list
A list of dictionaries containing the properties of clumps. Each
element of the list is a dictio:nary for a single time step.
(see pClump_properties)
**RETURNS**
link_list : list
A list of link arrays. link_list[i] contains the links between time
step i and i+1.
ie: link_list[i] = link2(clump_pars_list[i], clump_pars_list[i+1])
"""
arg_list = zip(clump_pars_list[0:-1], clump_pars_list[1:])
nproc = cpu_count()
pool = Pool(nproc)
link_list = pool.map(_parallel_link2, arg_list)
pool.close()
pool.join()
return link_list
def link2(clump_pars1, clump_pars2, link_thresh = 0.2):
"""
'Links' the clumps in two consecutive timesteps. i.e., a clump numbered
10 in one time step might be numbered 15 in the next. Requires the particle
iord (ie, particle ID)
**ARGUMENTS**
clump_pars1 : dict
Dict containing the properties of the clumps in time-step 1
(see clump_properties)
clump_pars2 : dict
Dict containing the properties of the clumps in time-step 2
(see clump_properties)
link_thresh : int (optional)
The minimum fraction of particles in clump i at time 1 which must end
up in clump j at time 2 to consider the clumps 'linked'
**RETURNS**
clump_pairs : array
2D numpy array organized according to (parent index, clump number)
where clump number is the number of a clump in time 2 and parent index
is the number of it's "parent" in time-step 1 (ie, that same clump's
ID in time-step 1)
A parent index of -1 corresponds to no parent (a new clump)
"""
if clump_pars2 is None:
# there are no clumps in the second time step. Any clumps in the first
# time step must have died. Return None
return
if clump_pars1 is None:
# Any clumps in the second time step are new. This is handled by
# saying their parents are -1
n2 = len(clump_pars2['iord'])
clump_pairs = np.zeros([n2, 2], dtype=int)
clump_pairs[:,0] = -1
clump_pairs[:,1] = np.arange(n2)
return clump_pairs
# number of clumps in each time step
n1 = len(clump_pars1['iord'])
n2 = len(clump_pars2['iord'])
iord_list1 = list(clump_pars1['iord'])
iord_list2 = list(clump_pars2['iord'])
# Used to store how many particles clumps have in common
connections = np.zeros([n1, n2], dtype=int)
# Calculate the number of connections common to the clumps in clump_pars1
# and the clumps in clump_pars2
# Loop over the first set of clumps
for i, iord1 in enumerate(iord_list1):
# Loop over the second set of clumps
iord1.sort()
npart1 = len(iord1)
for j, iord2 in enumerate(iord_list2):
# Find which particles are common to clump[i] in pars1 and clump[j]
# in pars2
intersect = np.intersect1d(iord1, iord2, assume_unique=True)
# Save how many are shared in the 2 clumps
connections[i,j] = len(intersect)
# Now only retain particles that are not common to the clumps
# IE, we know where these particles end up, we can stop checking
# them
iord1 = np.setdiff1d(iord1, intersect, assume_unique=True)
iord2 = np.setdiff1d(iord2, intersect, assume_unique=True)
iord_list2[j] = iord2
if len(iord1) < 1:
# There are no more particles to look at in the original clump
break
# Now ignore any connections where number of particles shared between
# clumps less than link_thresh * (num. part. in clump 1)
thresh_mask = connections[i,:] < link_thresh * npart1
connections[i, thresh_mask] = 0
# Find the clump in clump_pars2 that shares the most number of members for
# a clump in clump_pars1. This gives us the children of the clumps in
# clump_pars1
col_ind = connections.argmax(1)
# Set all others to 0
mask = np.zeros([n1,n2], dtype=bool)
row_ind = np.arange(n1)
mask[row_ind, col_ind] = True
connections[~mask] = 0
# The clumps in clump_pars2 may have multiple parents. Select the parent
# which shares the most particles in common (note, if this isn't the
# child of any clump, parent_index will always be 0)
parent_index = connections.argmax(0)
# and find the number of particles inherited from the parent
n_inherit = connections.max(0)
# Demand to inherit at least 1 particle from the parent
parent_index[n_inherit < 1] = -1
# Now create the clump pairs
clump_pairs = np.array( [parent_index, np.arange(n2)] ).T
return clump_pairs
def clump_im(f, clump_array, width, qty='rho', resolution=1200, clim=None, clump_min=None):
"""
Plots an sph image from f with particles not in clumps colored red and
particles in clumps colored green. Uses pynbody for a backend.
**ARGUMENTS**
f : TipsySnapshot (see pynbody) or str
The snapshot to plot OR the filename of a snapshot to plot
clump_array : numpy array
A array (same length as f) such that 0 entries correspond to particles
not in clumps and entries > 0 correspond to particles in clumps
width : str, simarray
See pynbody.plot.sph.image. Width of the plot, ie '3 au'
resolution : int
Resolution in pixels of the plot. The plot will be res by res pixels
clim : tuple,list,array
Density limits arranged as [low, high]. Any pixels below low are mapped
to 0, any pixels above high are mapped to 1.
clump_min : float
Used to set a cutoff for masking the clumps. Not needed
**RETURNS**
image : numpy nd-array
Returns an NxNx3 numpy array of the color image plotted.
"""
# Check if the input is a filename
if isinstance(f, str):
f = pynbody.load(f)
# Get the current state for matplotlib (this still needs work, since an
# extra window will in general be created)
current_fig = plt.gcf()
interactive_flag = plt.isinteractive()
plt.ioff()
# Intermediate figure, required for rendering the plots
fig1 = plt.figure()
# Render a grayscale image of all the particles
im_all = pynbody.plot.sph.image(f.g, width=width,resolution=resolution, cmap='gray', qty=qty)
xlim = plt.xlim()
ylim = plt.ylim()
extent = [xlim[0], xlim[1], ylim[0], ylim[1]]
fig1.clf()
# Initialize the color image
im_color = np.zeros([resolution, resolution, 3])
# Set the red channel of the color image to be the plot of all particles
im_color[:,:,0] = np.log(im_all)
# Check to see that there is at least one clump
clump_flag = (clump_array.max() > 0)
if clump_flag:
# Get a sub-snap of just particles in a clump
mask = clump_array > 0
f2 = f[mask]
# Render an image of just particles in a clump
im_clump = pynbody.plot.sph.image(f2.g, width=width,resolution=resolution, cmap='gray',qty=qty)
# Set the clump image as the green channel
im_color[:,:,1] = np.log(im_clump)
plt.clf()
# Most of the clump image should be blank space: igore everything
# below a threshold
if clump_min is None:
clump_min = im_clump.mean()
mask2 = im_clump > clump_min
# Set the color scaling
if clim is None:
clim = [im_all.min(), im_all.max()]
log_clim = [np.log(clim[0]), np.log(clim[1])]
im_color -= log_clim[0]
im_color /= (log_clim[1] - log_clim[0])
im_color[im_color < 0] = 0
im_color[im_color > 1] = 1
if clump_flag:
# Set all pixels outside a clump (in the clump image) to 0
im_color[~mask2,1] = 0
# Set all pixels inside a clump (in the overall image) to 0
im_color[mask2,0] = 0
else:
im_color[:,:,1] = 0
im_color[:,:,2] = 0
# Plot
plt.figure(current_fig.number)
if interactive_flag:
plt.ion()
plt.imshow(im_color, extent=extent, interpolation='none', aspect='equal')
# Echo the color limits used
print 'clims used: {}'.format(clim)
plt.close(fig1)
return im_color
def _parallel_clump_pars(args):
"""
A wrapper to parallelize clump_properties
"""
return clump_properties(*args)
def pClump_properties(flist, clumpnum_list):
"""
A parallel (batch) implementation of clump_properties. Calculates the
physical properties of clumps in a list of snapshots.
**ARGUMENTS**
flist : list
A list of tipsy snapshots or filenames pointing to snapshots.
clumpnum_list : list
A list of arrays (one per snapshot) which define the clump number
each particle belongs to (see pFind_clumps)
**RETURNS**
properties : list
A list of dictionaries which contain the clump properties for every
simulation (see clump_properties)
"""
nproc = cpu_count()
arg_list = zip(flist, clumpnum_list)
pool = Pool(nproc)
properties = pool.map(_parallel_clump_pars, arg_list)
pool.close()
pool.join()
return properties
def clump_properties(f, clump_nums):
"""
Calculates the physical properties of clumps in a snapshot.
**ARGUMENTS**
f : str -OR- tipsy snapshot
Either a tipsy snapshot or a filename pointing to a snapshot
clump_nums : array like
Clump number that each particle belongs to (see clumps.find_clumps).
0 corresponds to not being in a clump.
**RETURNS**
properties : dict
A dictionary containing the physical properties of the clumps.
Keys are:
'm' mass
'N' Number of particles
'pos' xyz position
'r' cylindrical radial position
'v' center of mass velocity
'L' Angular momentum relative to clump center of mass
'T' Average temperature
'rho' Average density
'r_clump' Clump radius. Sqrt of mass averaged particle distance squared
(from the center of mass). IE: r = sqrt( sum(mr^2)/sum(m))
'ids' particle IDs in the clump (first particle in simulation is
0, second is 1, etc...)
'iord' Particle iord (a particle's ID for the whole simulation)
"""
if clump_nums.max() < 1:
# Return none if there are no clumps
return
if isinstance(f, str):
f = pynbody.load(f)
try:
iorder = f['iord']
except KeyError:
print 'Warning. iorder not found. Assuming 0,1,2,3...'
iorder = np.arange(len(f))
particle_nums = np.arange(len(f))
# Only include particles in a clump AND that are not star particles
mask1 = clump_nums > 0
n_star = len(f.s)
mask1[-(n_star+1):-1] = False
clump_nums1 = clump_nums[mask1]
f1 = f[mask1]
iorder1 = iorder[mask1]
particle_nums1 = particle_nums[mask1]
# Get units set up
m_unit = f1['mass'].units
l_unit = f1['pos'].units
v_unit = f1['vel'].units
rho_unit = f1['rho'].units
# Get arrays of pointers to the required quantities
f_mass = f1['mass']
f_pos = f1['pos']
f_v = f1['vel']
f_T = f1['temp']
f_rho = f1['rho']
# Initialize arrays
n_clumps = clump_nums1.max()
m = SimArray(np.zeros(n_clumps), m_unit) # clump mass
N = np.zeros(n_clumps, dtype=int) # Number of particles/clump
pos = SimArray(np.zeros([n_clumps,3]), l_unit) # center of mass
r = SimArray(np.zeros(n_clumps), l_unit) # center of mass radial position
v = SimArray(np.zeros([n_clumps, 3]), v_unit) # center of mass velocity
# Angular momentum around the center of mass rest frame
L = SimArray(np.zeros([n_clumps, 3]), m_unit*l_unit*v_unit)
T = SimArray(np.zeros(n_clumps), 'K') # mass averaged temperature
rho = SimArray(np.zeros(n_clumps), rho_unit) # density
r_clump = SimArray(np.zeros(n_clumps), l_unit) # clump radius (size)
# index of each particle (in this file)
particle_ids = []
# universal identity of each particle
particle_iord = []
# loop over the clumps
for i in range(n_clumps):
mask2 = (clump_nums1 == i+1)
# Mask the input arrays to look at only the current clump
p_mass = f_mass[mask2]
p_pos = f_pos[mask2]
p_v = f_v[mask2]
p_T = f_T[mask2]
p_rho = f_rho[mask2]
# Calculate properties of the clump
N[i] = mask2.sum()
m[i] = p_mass.sum()
pos[i] = np.dot(p_pos.T, p_mass[:,None]).flatten()
pos[i] /= float(m[i])
r[i] = np.sqrt((pos[i]**2).sum())
v[i] = np.dot(p_v.T, p_mass[:,None]).flatten()
v[i] /= float(m[i])
# position of all particles relative to center of mass
cm_pos = p_pos - pos[i]
# velocity of all particles relative to center of mass
cm_v = p_v - v[i]
# angular momentum of all particles relative to center of mass
cm_momentum = (cm_v * p_mass[:,None])
p_L = np.cross(cm_pos, cm_momentum)
# Total angular momentum relative to center of mass
L[i] = p_L.sum(0)
T[i] = p_T.sum()/N[i]
rho[i] = p_rho.sum()/N[i]
# Clump radius
try:
r_clump[i] = np.sqrt((p_mass*( (cm_pos**2).sum(1) )).sum()/m[[i]])
except pynbody.units.UnitsException:
print 'i is: {}'.format(i)
return p_mass, cm_pos, m
particle_ids.append(particle_nums1[mask2])
particle_iord.append(iorder1[mask2])
properties = {'m':m, 'N':N, 'pos':pos, 'r':r, 'v':v, 'L':L, 'T':T, 'rho':rho,\
'r_clump': r_clump, 'ids': particle_ids, 'iord': particle_iord}
return properties
def _parallel_find_clumps(args):
"""
A wrapper to parallelize find_clumps()
"""
return find_clumps(*args)
def pFind_clumps(f_list, n_smooth=32, param=None, arg_string=None, verbose=True):
"""
A parallel implementation of find_clumps. Since SKID is not parallelized
this can be used to run find_clumps on a set of snapshots from one
simulation.
**ARGUMENTS**
f_list : list
A list containing the filenames of snapshots OR the tipsy snapshots
n_smooth : int (optional)
Number of nearest neighbors used for particle smoothing in the
simulation. This is used in the definition of a density threshold
for clump finding.
param : str (optional)
filename for a tipsy .param file
arg_string : str (optional)
Additional arguments to be passed to SKID. Cannot use -tau, -d, -m, -s, -o
verbose : bool
Verbosity flag. Default is True
**RETURNS**
clumpnum_list : list
A list containing the particle clump assignment for every snapshot in
f_list. clumps[i][j] gives the clump number for particle j in
snapshot i.
"""
# Number of processes to create = number of cores
n_proc = cpu_count()
# Set up the arguments for calls to find_clumps
arg_list = []
for i, f_name in enumerate(f_list):
arg_list.append([f_name, n_smooth, param, arg_string, i, verbose])
print arg_list
# Set up the pool
pool = Pool(n_proc)
# Run the job in parallel
results = pool.map(_parallel_find_clumps, arg_list, chunksize=1)
pool.close()
pool.join()
return results
def find_clumps(f, n_smooth=32, param=None, arg_string=None, seed=None, verbose=True):
"""
Uses skid (https://github.com/N-BodyShop/skid) to find clumps in a gaseous
protoplanetary disk.
The linking length used is equal to the gravitational softening length of
the gas particles.
The density cut-off comes from the criterion that there are n_smooth particles
within the Hill sphere of a particle. This is formulated mathematically as:
rho_min = 3*n_smooth*Mstar/R^3
where R is the distance from the star. The trick used here is to multiply
all particle masses by R^3 before running skid so the density cut-off is:
rho_min = 3*n_smooth*Mstar
**ARGUMENTS**
*f* : TipsySnap, or str
A tipsy snapshot loaded/created by pynbody -OR- a filename pointing to a
snapshot.
*n_smooth* : int (optional)
Number of particles used in SPH calculations. Should be the same as used
in the simulation. Default = 32
*param* : str (optional)
filename for a .param file for the simulation
*arg_string* : str (optional)
Additional arguments to be passed to skid. Cannot use -tau, -d, -m, -s, -o
*seed* : int
An integer used to seed the random filename generation for temporary
files. Necessary for multiprocessing and should be unique for each
thread.
*verbose* : bool
Verbosity flag. Default is True
**RETURNS**
*clumps* : array, int-like
Array containing the group number each particle belongs to, with star
particles coming after gas particles. A zero means the particle belongs
to no groups
"""
# Parse areguments
if isinstance(f, str):
f = pynbody.load(f, paramfile=param)
if seed is not None:
np.random.seed(seed)
# Estimate the linking length as the gravitational softening length
tau = f.g['eps'][0]
# Calculate minimum density
rho_min = 3*n_smooth*f.s['mass'][0]
# Center on star. This is done because R used in hill-sphere calculations
# is relative to the star
star_pos = f.s['pos'].copy()
f['pos'] -= star_pos
# Scale mass by R^3
R = isaac.strip_units(f['rxy'])
m0 = f['mass'].copy()
f['mass'] *= (R+tau)**3
# Save temporary snapshot
f_prefix = str(np.random.randint(np.iinfo(int).max))
f_name = f_prefix + '.std'
# Save temporary .param file
if param is not None:
param_name = f_prefix + '.param'
param_dict = isaac.configparser(param, 'param')
isaac.configsave(param_dict, param_name)
f.write(filename=f_name, fmt=pynbody.tipsy.TipsySnap)
f['pos'] += star_pos
f['mass'] = m0
command = 'totipnat < {} | skid -tau {:.2e} -d {:.2e} -m {:d} -s {:d} -o {}'\
.format(f_name, tau, rho_min, n_smooth, n_smooth, f_prefix)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if verbose:
for line in iter(p.stdout.readline, ''):
print line,
p.wait()
# Load clumps
clumps = isaac.loadhalos(f_prefix + '.grp')
# Cleanup
for name in glob.glob(f_prefix + '*'):
os.remove(name)
return clumps
|
mit
|
djgagne/scikit-learn
|
examples/plot_kernel_approximation.py
|
262
|
8004
|
"""
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
stefanseibert/DataMining
|
experiment01/resources/b103_stockMarketClusteringFragment.py
|
1
|
3218
|
"""
Created on 26.02.2012
@author: maucher
This version applies cleaned data provided by matplotlib.finance
In the cleaned data also the "open" value is adjusted w.r.t. splits and dividends
"""
print __doc__
import datetime
from matplotlib import finance
import numpy as np
from matplotlib import pyplot as plt
from sklearn import cluster
from sklearn import metrics
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 01, 01)
d2 = datetime.datetime(2008, 01, 01)
symbol_dict = {
'TOT' : 'Total',
'XOM' : 'Exxon',
'CVX' : 'Chevron',
'COP' : 'ConocoPhillips',
'VLO' : 'Valero Energy',
'MSFT' : 'Microsoft',
'IBM' : 'IBM',
'TWX' : 'Time Warner',
'CMCSA': 'Comcast',
'CVC' : 'Cablevision',
'YHOO' : 'Yahoo',
'DELL' : 'Dell',
'HPQ' : 'Hewlett-Packard',
'AMZN' : 'Amazon',
'TM' : 'Toyota',
'CAJ' : 'Canon',
'MTU' : 'Mitsubishi',
'SNE' : 'Sony',
'F' : 'Ford',
'HMC' : 'Honda',
'NAV' : 'Navistar',
'NOC' : 'Northrop Grumman',
'BA' : 'Boeing',
'KO' : 'Coca Cola',
'MMM' : '3M',
'MCD' : 'Mc Donalds',
'PEP' : 'Pepsi',
'KFT' : 'Kraft Foods',
'K' : 'Kellogg',
'UN' : 'Unilever',
'MAR' : 'Marriott',
'PG' : 'Procter Gamble',
'CL' : 'Colgate-Palmolive',
'NWS' : 'News Corporation',
'GE' : 'General Electrics',
'WFC' : 'Wells Fargo',
'JPM' : 'JPMorgan Chase',
'AIG' : 'AIG',
'AXP' : 'American express',
'BAC' : 'Bank of America',
'GS' : 'Goldman Sachs',
'AAPL' : 'Apple',
'SAP' : 'SAP',
'CSCO' : 'Cisco',
'TXN' : 'Texas instruments',
'XRX' : 'Xerox',
'LMT' : 'Lookheed Martin',
'WMT' : 'Wal-Mart',
'WAG' : 'Walgreen',
'HD' : 'Home Depot',
'GSK' : 'GlaxoSmithKline',
'PFE' : 'Pfizer',
'SNY' : 'Sanofi-Aventis',
'NVS' : 'Novartis',
'KMB' : 'Kimberly-Clark',
'R' : 'Ryder',
'GD' : 'General Dynamics',
'RTN' : 'Raytheon',
'CVS' : 'CVS',
'CAT' : 'Caterpillar',
'DD' : 'DuPont de Nemours',
}
symbols, names = np.array(symbol_dict.items()).T
print "----------------------------Symbols---------------------------------------"
print symbols
print "----------------------------Names---------------------------------------"
print names
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
print "----------------------------Quotes---------------------------------------"
print "Number of quotes: ",len(quotes)
print "--------------------------open and close-----------------------------------"
#volumes = np.array([q.volume for q in quotes]).astype(np.float)
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
|
mit
|
hitszxp/scikit-learn
|
sklearn/tests/test_multiclass.py
|
4
|
22382
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
"""Test that ovr works with classes that are always present or absent."""
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
ovr.fit(iris.data, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# test that ties are broken using the decision function, not defaulting to
# the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
# recalculate votes to make sure we have a tie
predictions = np.vstack([clf.predict(X) for clf in multi_clf.estimators_])
scores = np.vstack([clf.decision_function(X)
for clf in multi_clf.estimators_])
# classifiers are in order 0-1, 0-2, 1-2
# aggregate votes:
votes = np.zeros((4, 3))
votes[np.arange(4), predictions[0]] += 1
votes[np.arange(4), 2 * predictions[1]] += 1
votes[np.arange(4), 1 + predictions[2]] += 1
# for the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# for the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# for the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], 0)
# in the zero-one classifier, the score for 0 is greater than the score for
# one.
assert_greater(scores[0][0], scores[0][1])
# score for one is greater than score for zero
assert_greater(scores[2, 0] - scores[0, 0], scores[0, 0] + scores[1, 0])
# score for one is greater than score for two
assert_greater(scores[2, 0] - scores[0, 0], -scores[1, 0] - scores[2, 0])
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
"Test that the OvO doesn't screw the encoding of string labels"
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
svc = LinearSVC()
ovo = OneVsOneClassifier(svc)
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb, X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
if __name__ == "__main__":
import nose
nose.runmodule()
|
bsd-3-clause
|
0x0all/scikit-learn
|
sklearn/linear_model/randomized_l1.py
|
4
|
23089
|
"""
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
|
bsd-3-clause
|
mrocklin/into
|
into/backends/hdfstore.py
|
1
|
3041
|
from __future__ import absolute_import, division, print_function
import pandas as pd
import datashape
from datashape import discover
from ..append import append
from ..convert import convert, ooc_types
from ..chunks import chunks, Chunks
from ..resource import resource
HDFDataset = (pd.io.pytables.AppendableFrameTable, pd.io.pytables.FrameFixed)
@discover.register(pd.HDFStore)
def discover_hdfstore(f):
d = dict()
for key in f.keys():
d2 = d
key2 = key.lstrip('/')
while '/' in key2:
group, key2 = key2.split('/', 1)
if group not in d2:
d2[group] = dict()
d2 = d2[group]
d2[key2] = f.get_storer(key)
return discover(d)
@discover.register(pd.io.pytables.Fixed)
def discover_hdfstore_storer(storer):
f = storer.parent
n = storer.shape
if isinstance(n, list):
n = n[0]
measure = discover(f.select(storer.pathname, start=0, stop=10)).measure
return n * measure
@convert.register(chunks(pd.DataFrame), pd.io.pytables.AppendableFrameTable)
def hdfstore_to_chunks_dataframes(data, chunksize=1000000, **kwargs):
return chunks(pd.DataFrame)(data.parent.select(data.pathname, chunksize=chunksize))
@convert.register(pd.DataFrame, (pd.io.pytables.AppendableFrameTable,
pd.io.pytables.FrameFixed))
def hdfstore_to_chunks_dataframes(data, **kwargs):
return data.read()
from collections import namedtuple
EmptyHDFStoreDataset = namedtuple('EmptyHDFStoreDataset', 'parent,pathname,dshape')
@resource.register('hdfstore://.+', priority=11)
def resource_hdfstore(uri, datapath=None, dshape=None, **kwargs):
# TODO:
# 1. Support nested datashapes (e.g. groups)
# 2. Try translating unicode to ascii? (PyTables fails here)
fn = uri.split('://')[1]
f = pd.HDFStore(fn)
if dshape is None:
if datapath:
return f.get_storer(datapath)
else:
return f
dshape = datashape.dshape(dshape)
# Already exists, return it
if datapath in f:
return f.get_storer(datapath)
# Need to create new datast.
# HDFStore doesn't support empty datasets, so we use a proxy object.
return EmptyHDFStoreDataset(f, datapath, dshape)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset), pd.DataFrame)
def append_dataframe_to_hdfstore(store, df, **kwargs):
store.parent.append(store.pathname, df, append=True)
return store.parent.get_storer(store.pathname)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset),
chunks(pd.DataFrame))
def append_chunks_dataframe_to_hdfstore(store, c, **kwargs):
parent = store.parent
for chunk in c:
parent.append(store.pathname, chunk)
return parent.get_storer(store.pathname)
@append.register((pd.io.pytables.Fixed, EmptyHDFStoreDataset), object)
def append_object_to_hdfstore(store, o, **kwargs):
return append(store, convert(chunks(pd.DataFrame), o, **kwargs), **kwargs)
ooc_types |= set(HDFDataset)
|
bsd-3-clause
|
asnorkin/sentiment_analysis
|
site/lib/python2.7/site-packages/sklearn/datasets/tests/test_base.py
|
5
|
8862
|
import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import with_setup
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
|
mit
|
spbguru/repo1
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_macosx.py
|
69
|
15397
|
from __future__ import division
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
def show():
"""Show all the figures and enter the Cocoa mainloop.
This function will not return until all windows are closed or
the interpreter exits."""
# Having a Python-level function "show" wrapping the built-in
# function "show" in the _macosx extension module allows us to
# to add attributes to "show". This is something ipython does.
_macosx.show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forwards the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_path(path, transform, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_markers(marker_path, marker_trans, path, trans, rgbFace)
def draw_path_collection(self, *args):
gc = self.gc
args = args[:13]
gc.draw_path_collection(*args)
def draw_quad_mesh(self, *args):
gc = self.gc
gc.draw_quad_mesh(*args)
def new_gc(self):
self.gc.reset()
return self.gc
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
self.gc.draw_image(x, y, nrows, ncols, data, bbox, clippath, clippath_trans)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
size = prop.get_size_in_points()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
gc.draw_text(x, y, unicode(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# TODO: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: handle descent; This is based on backend_agg.py
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
return self.gc.get_text_width_height_descent(unicode(s), family, size, weight, style)
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_foreground(self, fg, isRGB=False):
if not isRGB:
fg = colorConverter.to_rgb(fg)
_macosx.GraphicsContext.set_foreground(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', **kwargs):
if dpi is None: dpi = matplotlib.rcParams['savefig.dpi']
filename = unicode(filename)
root, ext = os.path.splitext(filename)
ext = ext[1:].lower()
if not ext:
ext = "png"
filename = root + "." + ext
if ext=="jpg": ext = "jpeg"
# save the figure settings
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
# set the new parameters
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
if ext in ('jpeg', 'png', 'tiff', 'gif', 'bmp'):
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
self.write_bitmap(filename, width, height)
elif ext == 'pdf':
self.write_pdf(filename)
elif ext in ('ps', 'eps'):
from backend_ps import FigureCanvasPS
# Postscript backend changes figure.dpi, but doesn't change it back
origDPI = self.figure.dpi
fc = self.switch_backends(FigureCanvasPS)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.dpi = origDPI
self.figure.set_canvas(self)
elif ext=='svg':
from backend_svg import FigureCanvasSVG
fc = self.switch_backends(FigureCanvasSVG)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.set_canvas(self)
else:
raise ValueError("Figure format not available (extension %s)" % ext)
# restore original figure settings
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbarMac(canvas)
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# This is ugly, but this is what tkagg and gtk are doing.
# It is needed to get ginput() working.
self.canvas.figure.show = lambda *args: self.show()
def show(self):
self.canvas.draw()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbarMac(_macosx.NavigationToolbar):
def __init__(self, canvas):
self.canvas = canvas
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
images = {}
for imagename in ("stock_left",
"stock_right",
"stock_up",
"stock_down",
"stock_zoom-in",
"stock_zoom-out",
"stock_save_as"):
filename = os.path.join(basedir, imagename+".ppm")
images[imagename] = self._read_ppm_image(filename)
_macosx.NavigationToolbar.__init__(self, images)
self.message = None
def _read_ppm_image(self, filename):
data = ""
imagefile = open(filename)
for line in imagefile:
if "#" in line:
i = line.index("#")
line = line[:i] + "\n"
data += line
imagefile.close()
magic, width, height, maxcolor, imagedata = data.split(None, 4)
width, height = int(width), int(height)
assert magic=="P6"
assert len(imagedata)==width*height*3 # 3 colors in RGB
return (width, height, imagedata)
def panx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.pan(direction)
self.canvas.invalidate()
def pany(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.pan(direction)
self.canvas.invalidate()
def zoomx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.zoom(direction)
self.canvas.invalidate()
def zoomy(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.zoom(direction)
self.canvas.invalidate()
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(x0, y0, x1, y1)
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerMac
|
gpl-3.0
|
mikaem/spectralDNS
|
sandbox/cheb_biharmonic.py
|
2
|
5718
|
from numpy.polynomial import chebyshev as n_cheb
from sympy import chebyshevt, Symbol, sin, cos, pi, exp, lambdify, sqrt as Sqrt
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve_banded, lu_factor, lu_solve
from scipy.sparse import diags
import scipy.sparse.linalg as la
from spectralDNS.shen.shentransform import ShenBiharmonicBasis
from spectralDNS.shen.Matrices import ABBmat, BBBmat, SBBmat
from spectralDNS.shen.la import Biharmonic
from scipy.linalg import solve
"""
Solve biharmonic equation on (-1, 1)
\nabla^4 u - a \nabla^2 u + b u = f, u(\pm 1) = u'(\pm 1) = 0
where a and b are some integer wavenumbers.
The equation to be solved for is
(\nabla^4 u, \phi_k)_w - a(\nabla^2 u, \phi_k)_w + b(u, \phi_k)_w = (f, \phi_k)_w
(A - aC + bB) u = f
"""
# Use sympy to compute a rhs, given an analytical solution
x = Symbol("x")
u = (1-x**2)*sin(8*pi*x)*cos(4*pi*x)
N = 256
k = 2*N
nu = 1./590.
dt = 5e-5
a = -(k**2+nu*dt/2*k**4)*0
#b = (1.+nu*dt*k**2)
b = 1
c = -nu*dt/2.*0
f = a*u.diff(x, 4) + b*u.diff(x, 2) + c*u
SD = ShenBiharmonicFunctionSpace("GC", True)
points, weights = SD.points_and_weights(N)
uj = np.array([u.subs(x, j) for j in points], dtype=float)
fj = np.array([f.subs(x, j) for j in points], dtype=float) # Get f on quad points
#uj_hat = np.zeros(N)
#uj_hat = SD.fst(uj, uj_hat)
#uj = SD.ifst(uj_hat, uj)
#fj_hat = np.zeros(N)
#fj_hat = SD.fst(fj, fj_hat)
#fj = SD.ifst(fj_hat, fj)
solver = Biharmonic(N, a, b, c, quad=SD.quad, solver="cython")
solver2 = Biharmonic(N, a, b, c, quad=SD.quad)
f_hat = np.zeros(N)
f_hat = SD.fastShenScalar(fj, f_hat)
u_hat = np.zeros(N)
u_hat2 = np.zeros(N)
from time import time
t0 = time()
u_hat = solver(u_hat, f_hat)
t1 = time()
u_hat2 = solver2(u_hat2, f_hat)
t2 = time()
print "cython / scipy ", t1-t0, t2-t1
u1 = np.zeros(N)
u1 = SD.ifst(u_hat, u1)
fr = np.random.randn(N)
fr_hat = np.zeros(N)
fr_hat = SD.fastShenScalar(fr, fr_hat)
ur_hat = np.zeros(N)
ur_hat2 = np.zeros(N)
ur_hat2 = solver2(ur_hat2, fr_hat)
ur_hat = solver(ur_hat, fr_hat)
c0 = np.zeros(N)
c0 = solver.matvec(ur_hat, c0)
print np.sqrt(sum((c0-fr_hat)**2)/N), max(abs(c0-fr_hat))/max(abs(fr_hat))
c1 = np.zeros(N)
c1 = solver2.matvec(ur_hat2, c1)
print np.sqrt(sum((c1-fr_hat)**2)/N), max(abs(c1-fr_hat))/max(abs(fr_hat))
#fr = SD.ifst(fr_hat, fr)
#fr_hat = SD.fst(fr, fr_hat)
#fr2 = SD.ifst(fr_hat, fr2)
#assert np.allclose(fr2, fr)
print np.sqrt(sum((u1-uj)**2)/N), max(abs(u1-uj))/max(abs(uj))
cc = np.zeros(N)
cc = solver.matvec(u_hat, cc)
print np.sqrt(sum((cc-f_hat)**2)/N), max(abs(cc-f_hat))/max(abs(f_hat))
assert np.allclose(u1, uj)
##alfa = np.ones((4,4))
##beta = np.ones((4,4))
##solver = Biharmonic(N, -1, alfa, beta, quad=SD.quad)
##f_hat = f_hat.repeat(16).reshape((N, 4, 4))+f_hat.repeat(16).reshape((N, 4, 4))*1j
##u_hat = u_hat.repeat(16).reshape((N, 4, 4))+u_hat.repeat(16).reshape((N, 4, 4))*1j
##u_hat = solver(u_hat, f_hat)
##u1 = np.zeros((N, 4, 4), dtype=complex)
##u1 = SD.ifst(u_hat, u1)
##uj = uj.repeat(16).reshape((N, 4, 4)) + 1j*uj.repeat(16).reshape((N, 4, 4))
##assert np.allclose(u1, uj)
#from spectralDNS.shen.SFTc import *
#sii, siu, siuu = solver.S.dd, solver.S.ud[0], solver.S.ud[1]
#ail, aii, aiu = solver.A.ld, solver.A.dd, solver.A.ud
#bill, bil, bii, biu, biuu = solver.B.lld, solver.B.ld, solver.B.dd, solver.B.ud, solver.B.uud
#M = sii[::2].shape[0]
#u0 = np.zeros((2, M), float) # Diagonal entries of U
#u1 = np.zeros((2, M-1), float) # Diagonal+1 entries of U
#u2 = np.zeros((2, M-2), float) # Diagonal+2 entries of U
#l0 = np.zeros((2, M-1), float) # Diagonal-1 entries of L
#l1 = np.zeros((2, M-2), float) # Diagonal-2 entries of L
##d = 1e-6*sii + a*aii + b*bii
#d = np.ones(N-4)
#LU_Biharmonic_1D(a, b, c, sii, siu, siuu, ail, aii, aiu, bill, bil, bii, biu, biuu, u0, u1, u2, l0, l1)
#uk = np.zeros(N)
##u0[0] = solver.Le[0].diagonal(0)
##u0[1] = solver.Lo[0].diagonal(0)
##u1[0] = solver.Le[0].diagonal(1)
##u1[1] = solver.Lo[0].diagonal(1)
#AA = a*solver.S.diags().toarray() + b*solver.A.diags().toarray() + c*solver.B.diags().toarray()
##de = np.eye(N-4)/d
##AA = np.dot(de, AA)
##fr_hat[:-4] = fr_hat[:-4] / d
#U = np.zeros((2, M, M), float)
#ll0 = np.zeros((2, M-1), float) # Diagonal-1 entries of L
#ll1 = np.zeros((2, M-2), float) # Diagonal-2 entries of L
#ukk = np.zeros(N)
#uk2 = np.zeros(N)
#LUC_Biharmonic_1D(AA, U, ll0, ll1)
#Solve_LUC_Biharmonic_1D(fr_hat, ukk, U, ll0, ll1, 0)
#Le = diags([ll1[0], ll0[0], np.ones(M)], [-2, -1, 0]).toarray()
#assert np.allclose(np.dot(Le, U[0]), AA[::2, ::2])
#Lo = diags([ll1[1], ll0[1], np.ones(M)], [-2, -1, 0]).toarray()
#assert np.allclose(np.dot(Lo, U[1]), AA[1::2, 1::2])
#ak = np.zeros((2, M), float)
#bk = np.zeros((2, M), float)
#Biharmonic_factor_pr(ak, bk, l0, l1)
#Solve_Biharmonic_1D(fr_hat, uk, u0, u1, u2, l0, l1, ak, bk, a)
##ff = fr_hat.copy()
##ff[:-4] *= d
##u_hat = solver(u_hat, ff)
##Ae = AA[::2, ::2]
##u2 = (Ae.diagonal()[2:] - l0[0, 1:]*u1[0, 1:] - u0[0, 2:])/l1[0, :]
##print U[0].diagonal(2) - u2
##U[0].diagonal(3) - 1./l1[0, :-1] *(Ae.diagonal(1)[2:] - l0[0, 1:-1]*U[0].diagonal(2)[1:] - U[0].diagonal(1)[2:])
##from scipy.linalg import svd
##def my_cond(A):
##sigma = svd(A, full_matrices=False, compute_uv=False)
##sigma.sort()
##return sigma[-1]/sigma[0]
##print "Cond U = ", np.linalg.cond(U[0]), np.linalg.cond(U[1])
##print "Cond U = ", my_cond(U[0]), my_cond(U[1])
#Uc = U.copy()
#fc = fr_hat.copy()
#t0 = time()
#Solve_LUC_Biharmonic_1D(fc, uk2, Uc, ll0, ll1, 1)
#t1 = time()
#print t1-t0
##print "Cond U = ", np.linalg.cond(Uc[0]), np.linalg.cond(Uc[1])
##print "Cond U = ", my_cond(Uc[0]), my_cond(Uc[1])
|
gpl-3.0
|
nvoron23/scikit-learn
|
sklearn/covariance/robust_covariance.py
|
198
|
29735
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
bsd-3-clause
|
Adai0808/scikit-learn
|
sklearn/metrics/ranking.py
|
79
|
25426
|
"""Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
|
bsd-3-clause
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/doc/mpl_examples/pylab_examples/pythonic_matplotlib.py
|
9
|
2425
|
#!/usr/bin/env python
"""
Some people prefer to write more pythonic, object oriented, code
rather than use the pylab interface to matplotlib. This example shows
you how.
Unless you are an application developer, I recommend using part of the
pylab interface, particularly the figure, close, subplot, axes, and
show commands. These hide a lot of complexity from you that you don't
need to see in normal figure creation, like instantiating DPI
instances, managing the bounding boxes of the figure elements,
creating and reaslizing GUI windows and embedding figures in them.
If you are an application developer and want to embed matplotlib in
your application, follow the lead of examples/embedding_in_wx.py,
examples/embedding_in_gtk.py or examples/embedding_in_tk.py. In this
case you will want to control the creation of all your figures,
embedding them in application windows, etc.
If you are a web application developer, you may want to use the
example in webapp_demo.py, which shows how to use the backend agg
figure canvase directly, with none of the globals (current figure,
current axes) that are present in the pylab interface. Note that
there is no reason why the pylab interface won't work for web
application developers, however.
If you see an example in the examples dir written in pylab interface,
and you want to emulate that using the true python method calls, there
is an easy mapping. Many of those examples use 'set' to control
figure properties. Here's how to map those commands onto instance
methods
The syntax of set is
setp(object or sequence, somestring, attribute)
if called with an object, set calls
object.set_somestring(attribute)
if called with a sequence, set does
for object in sequence:
object.set_somestring(attribute)
So for your example, if a is your axes object, you can do
a.set_xticklabels([])
a.set_yticklabels([])
a.set_xticks([])
a.set_yticks([])
"""
from pylab import figure, show
from numpy import arange, sin, pi
t = arange(0.0, 1.0, 0.01)
fig = figure(1)
ax1 = fig.add_subplot(211)
ax1.plot(t, sin(2*pi*t))
ax1.grid(True)
ax1.set_ylim( (-2,2) )
ax1.set_ylabel('1 Hz')
ax1.set_title('A sine wave or two')
for label in ax1.get_xticklabels():
label.set_color('r')
ax2 = fig.add_subplot(212)
ax2.plot(t, sin(2*2*pi*t))
ax2.grid(True)
ax2.set_ylim( (-2,2) )
l = ax2.set_xlabel('Hi mom')
l.set_color('g')
l.set_fontsize('large')
show()
|
mit
|
jm-begon/scikit-learn
|
examples/tree/plot_tree_regression.py
|
206
|
1476
|
"""
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
kashif/scikit-learn
|
examples/cluster/plot_color_quantization.py
|
297
|
3443
|
# -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
|
bsd-3-clause
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/sklearn/linear_model/least_angle.py
|
15
|
57254
|
"""
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..model_selection import check_cv
from ..exceptions import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..externals.six import string_types
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif isinstance(Gram, string_types) and Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by
# the test suite. The `equality_tolerance` margin added in 0.16
# to get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale = self._preprocess_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_offset, y_offset, X_scale)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv.split(X, y))
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
https://en.wikipedia.org/wiki/Akaike_information_criterion
https://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
|
mit
|
zorojean/scikit-learn
|
sklearn/metrics/cluster/tests/test_unsupervised.py
|
230
|
2823
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
|
bsd-3-clause
|
aashish24/seaborn
|
seaborn/distributions.py
|
1
|
80369
|
"""Plotting functions for visualizing distributions."""
from __future__ import division
from textwrap import dedent
import colorsys
import numpy as np
from scipy import stats
import pandas as pd
from pandas.core.series import remove_na
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
try:
import statsmodels.nonparametric.api as smnp
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .external.six.moves import range
from .utils import set_hls_values, desaturate, iqr, _kde_support
from .palettes import color_palette, husl_palette, blend_palette, light_palette
from .axisgrid import JointGrid
class _BoxPlotter(object):
def __init__(self, x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.width = width
self.fliersize = fliersize
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def establish_variables(self, x=None, y=None, hue=None, data=None,
orient=None, order=None, hue_order=None):
"""Convert input specification into a common representation."""
# Option 1:
# We are plotting a wide-form dataset
# -----------------------------------
if x is None and y is None:
# Do a sanity check on the inputs
if hue is not None:
error = "Cannot use `hue` without `x` or `y`"
raise ValueError(error)
# No hue grouping with wide inputs
plot_hues = None
hue_title = None
hue_names = None
# We also won't get a axes labels here
value_label = None
group_label = None
# Option 1a:
# The input data is a Pandas DataFrame
# ------------------------------------
if isinstance(data, pd.DataFrame):
# Order the data correctly
if order is None:
order = []
# Reduce to just numeric columns
for col in data:
try:
data[col].astype(np.float)
order.append(col)
except ValueError:
pass
plot_data = data[order]
group_names = order
group_label = data.columns.name
# Convert to a list of arrays, the common representation
iter_data = plot_data.iteritems()
plot_data = [np.asarray(s, np.float) for k, s in iter_data]
# Option 1b:
# The input data is an array or list
# ----------------------------------
else:
# We can't reorder the data
if order is not None:
error = "Input data must be a pandas object to reorder"
raise ValueError(error)
# The input data is an array
if hasattr(data, "shape"):
if len(data.shape) == 1:
if np.isscalar(data[0]):
plot_data = [data]
else:
plot_data = list(data)
elif len(data.shape) == 2:
nr, nc = data.shape
if nr == 1 or nc == 1:
plot_data = [data.ravel()]
else:
plot_data = [data[:, i] for i in range(nc)]
else:
error = ("Input `data` can have no "
"more than 2 dimensions")
raise ValueError(error)
# Check if `data` is None to let us bail out here (for testing)
elif data is None:
plot_data = [[]]
# The input data is a flat list
elif np.isscalar(data[0]):
plot_data = [data]
# The input data is a nested list
# This will catch some things that might fail later
# but exhaustive checks are hard
else:
plot_data = data
# Convert to a list of arrays, the common representation
plot_data = [np.asarray(d, np.float) for d in plot_data]
# The group names will just be numeric indices
group_names = list(range((len(plot_data))))
# Figure out the plotting orientation
orient = "h" if str(orient).startswith("h") else "v"
# Option 2:
# We are plotting a long-form dataset
# -----------------------------------
else:
# See if we need to get `x` and `y` or `hue` from `data`
if data is not None:
x = data.get(x, x)
y = data.get(y, y)
hue = data.get(hue, hue)
# Figure out the plotting orientation
orient = self.infer_orient(x, y, orient)
# Option 2a:
# We are plotting a single set of data
# ------------------------------------
if x is None or y is None:
# Determine where the data are
vals = y if x is None else x
# Put them into the common representation
plot_data = [np.asarray(vals)]
# Get a label for the value axis
if hasattr(vals, "name"):
value_label = vals.name
else:
value_label = None
# This plot will not have group labels or hue nesting
groups = None
group_label = None
group_names = []
plot_hues = None
hue_names = None
hue_title = None
# Option 2b:
# We are grouping the data values by another variable
# ---------------------------------------------------
else:
# Determine which role each variable will play
if orient == "v":
vals, groups = y, x
else:
vals, groups = x, y
# Make sure the groupby is going to work
if not isinstance(vals, pd.Series):
vals = pd.Series(vals)
# Get the order of the box groups
if order is None:
try:
order = groups.unique()
except AttributeError:
order = pd.unique(groups)
group_names = list(order)
# Group the numeric data
grouped_vals = vals.groupby(groups)
plot_data = [grouped_vals.get_group(g) for g in order]
plot_data = [d.values for d in plot_data]
# Get the categorical axis label
if hasattr(groups, "name"):
group_label = groups.name
else:
group_label = None
# Get the numerical axis label
value_label = vals.name
# Now handle the hue levels for nested ordering
if hue is None:
plot_hues = None
hue_title = None
hue_names = None
else:
# Make sure the groupby is going to work
if not isinstance(hue, pd.Series):
hue = pd.Series(hue)
# Get the order of the hue levels
if hue_order is None:
try:
hue_order = hue.unique()
except AttributeError:
hue_order = pd.unique(hue)
hue_names = list(hue_order)
# Group the hue categories
grouped_hues = hue.groupby(groups)
plot_hues = [grouped_hues.get_group(g) for g in order]
plot_hues = [h.values for h in plot_hues]
# Get the title for the hues (will title the legend)
hue_title = hue.name
# Assign object attributes
# ------------------------
self.orient = orient
self.plot_data = plot_data
self.group_label = group_label
self.value_label = value_label
self.group_names = group_names
self.plot_hues = plot_hues
self.hue_title = hue_title
self.hue_names = hue_names
def establish_colors(self, color, palette, saturation):
"""Get a list of colors for the main component of the plots."""
if self.hue_names is None:
n_colors = len(self.plot_data)
else:
n_colors = len(self.hue_names)
# Determine the main colors
if color is None and palette is None:
# Determine whether the current palette will have enough values
# If not, we'll default to the husl palette so each is distinct
current_palette = mpl.rcParams["axes.color_cycle"]
if n_colors <= len(current_palette):
colors = color_palette(n_colors=n_colors)
else:
colors = husl_palette(n_colors, l=.7)
elif palette is None:
# When passing a specific color, the interpretation depends
# on whether there is a hue variable or not.
# If so, we will make a blend palette so that the different
# levels have some amount of variation.
if self.hue_names is None:
colors = [color] * n_colors
else:
colors = light_palette(color, n_colors)
else:
# Let `palette` be a dict mapping level to color
if isinstance(palette, dict):
if self.hue_names is None:
levels = self.group_names
else:
levels = self.hue_names
palette = [palette[l] for l in levels]
colors = color_palette(palette, n_colors)
# Conver the colors to a common rgb representation
colors = [mpl.colors.colorConverter.to_rgb(c) for c in colors]
# Desaturate a bit because these are patches
if saturation < 1:
colors = [desaturate(c, saturation) for c in colors]
# Determine the gray color to use for the lines framing the plot
light_vals = [colorsys.rgb_to_hls(*c)[1] for c in colors]
l = min(light_vals) * .6
gray = (l, l, l)
# Assign object attributes
self.colors = colors
self.gray = gray
def infer_orient(self, x, y, orient=None):
"""Determine how the plot should be oriented based on the data."""
orient = str(orient)
def is_categorical(s):
try:
# Correct way, but doesn't exist in older Pandas
return pd.core.common.is_categorical_dtype(s)
except AttributeError:
# Also works, but feels hackier
return str(s.dtype) == "categorical"
if orient.startswith("v"):
return "v"
elif orient.startswith("h"):
return "h"
elif x is None:
return "v"
elif y is None:
return "h"
elif is_categorical(y):
return "h"
else:
return "v"
@property
def hue_offsets(self):
"""A list of center positions for plots when hue nesting is used."""
n_levels = len(self.hue_names)
each_width = self.width / n_levels
offsets = np.linspace(0, self.width - each_width, n_levels)
offsets -= offsets.mean()
return offsets
@property
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
return self.width / len(self.hue_names) * .98
def annotate_axes(self, ax):
"""Add descriptive labels to an Axes object."""
if self.orient == "v":
xlabel, ylabel = self.group_label, self.value_label
else:
xlabel, ylabel = self.value_label, self.group_label
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if self.orient == "v":
ax.set_xticks(np.arange(len(self.plot_data)))
ax.set_xticklabels(self.group_names)
else:
ax.set_yticks(np.arange(len(self.plot_data)))
ax.set_yticklabels(self.group_names)
if self.orient == "v":
ax.xaxis.grid(False)
ax.set_xlim(-.5, len(self.plot_data) - .5)
else:
ax.yaxis.grid(False)
ax.set_ylim(-.5, len(self.plot_data) - .5)
if self.hue_names is not None:
leg = ax.legend(loc="best")
if self.hue_title is not None:
leg.set_title(self.hue_title)
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
try:
title_size = mpl.rcParams["axes.labelsize"] * .85
except TypeError: # labelsize is something like "large"
title_size = mpl.rcParams["axes.labelsize"]
prop = mpl.font_manager.FontProperties(size=title_size)
leg._legend_title_box._text.set_font_properties(prop)
def restyle_boxplot(self, artist_dict, color):
"""Take a drawn matplotlib boxplot and make it look nice."""
for box in artist_dict["boxes"]:
box.set_color(color)
box.set_zorder(.9)
box.set_edgecolor(self.gray)
box.set_linewidth(self.linewidth)
for whisk in artist_dict["whiskers"]:
whisk.set_color(self.gray)
whisk.set_linewidth(self.linewidth)
whisk.set_linestyle("-")
for cap in artist_dict["caps"]:
cap.set_color(self.gray)
cap.set_linewidth(self.linewidth)
for med in artist_dict["medians"]:
med.set_color(self.gray)
med.set_linewidth(self.linewidth)
for fly in artist_dict["fliers"]:
fly.set_color(self.gray)
fly.set_marker("d")
fly.set_markeredgecolor(self.gray)
fly.set_markersize(self.fliersize)
def add_legend_data(self, ax, x, y, color, label):
"""Add a dummy patch object so we can get legend data."""
rect = plt.Rectangle([x, y], 0, 0,
linewidth=self.linewidth / 2,
edgecolor=self.gray,
facecolor=color,
label=label, zorder=-1)
ax.add_patch(rect)
def draw_boxplot(self, ax, kws):
"""Use matplotlib to draw a boxplot on an Axes."""
vert = self.orient == "v"
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = remove_na(group_data)
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[i],
widths=self.width,
**kws)
color = self.colors[i]
self.restyle_boxplot(artist_dict, color)
else:
# Draw nested groups of boxes
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
if not hue_mask.any():
continue
box_data = remove_na(group_data[hue_mask])
center = i + offsets[j]
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[center],
widths=self.nested_width,
**kws)
color = self.colors[j]
self.restyle_boxplot(artist_dict, color)
# Add legend data, but just for one set of boxes
if not i:
self.add_legend_data(ax, center,
np.median(box_data),
color, hue_level)
def plot(self, ax, boxplot_kws):
"""Make the plot."""
self.draw_boxplot(ax, boxplot_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _ViolinPlotter(_BoxPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.estimate_densities(bw, cut, scale, scale_hue, gridsize)
self.gridsize = gridsize
self.width = width
self.inner = inner
if split and self.hue_names is not None and len(self.hue_names) != 2:
raise ValueError("Cannot use `split` with more than 2 hue levels.")
self.split = split
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):
"""Find the support and density for all of the data."""
# Initialize data structures to keep track of plotting data
if self.hue_names is None:
support = []
density = []
counts = np.zeros(len(self.plot_data))
max_density = np.zeros(len(self.plot_data))
else:
support = [[] for _ in self.plot_data]
density = [[] for _ in self.plot_data]
size = len(self.group_names), len(self.hue_names)
counts = np.zeros(size)
max_density = np.zeros(size)
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
# Strip missing datapoints
kde_data = remove_na(group_data)
# Handle special case of no data at this level
if kde_data.size == 0:
support.append(np.array([]))
density.append(np.array([1.]))
counts[i] = 0
max_density[i] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support.append(np.unique(kde_data))
density.append(np.array([1.]))
counts[i] = 1
max_density[i] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_i = self.kde_support(kde_data, bw_used, cut, gridsize)
density_i = kde.evaluate(support_i)
# Update the data structures with these results
support.append(support_i)
density.append(density_i)
counts[i] = kde_data.size
max_density[i] = density_i.max()
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
# Select out the observations for this hue level
hue_mask = self.plot_hues[i] == hue_level
# Strip missing datapoints
kde_data = remove_na(group_data[hue_mask])
# Handle special case of no data at this level
if kde_data.size == 0:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support[i].append(np.unique(kde_data))
density[i].append(np.array([1.]))
counts[i, j] = 1
max_density[i, j] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_ij = self.kde_support(kde_data, bw_used,
cut, gridsize)
density_ij = kde.evaluate(support_ij)
# Update the data structures with these results
support[i].append(support_ij)
density[i].append(density_ij)
counts[i, j] = kde_data.size
max_density[i, j] = density_ij.max()
# Scale the height of the density curve.
# For a violinplot the density is non-quantitative.
# The objective here is to scale the curves relative to 1 so that
# they can be multiplied by the width parameter during plotting.
if scale == "area":
self.scale_area(density, max_density, scale_hue)
elif scale == "width":
self.scale_width(density)
elif scale == "count":
self.scale_count(density, counts, scale_hue)
else:
raise ValueError("scale method '{}' not recognized".format(scale))
# Set object attributes that will be used while plotting
self.support = support
self.density = density
def fit_kde(self, x, bw):
"""Estimate a KDE for a vector of data with flexible bandwidth."""
# Allow for the use of old scipy where `bw` is fixed
try:
kde = stats.gaussian_kde(x, bw)
except TypeError:
kde = stats.gaussian_kde(x)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
# Extract the numeric bandwidth from the KDE object
bw_used = kde.factor
# At this point, bw will be a numeric scale factor.
# To get the actual bandwidth of the kernel, we multiple by the
# unbiased standard deviation of the data, which we will use
# elsewhere to compute the range of the support.
bw_used = bw_used * x.std(ddof=1)
return kde, bw_used
def kde_support(self, x, bw, cut, gridsize):
"""Define a grid of support for the violin."""
support_min = x.min() - bw * cut
support_max = x.max() + bw * cut
return np.linspace(support_min, support_max, gridsize)
def scale_area(self, density, max_density, scale_hue):
"""Scale the relative area under the KDE curve.
This essentially preserves the "standard" KDE scaling, but the
resulting maximum density will be 1 so that the curve can be
properly multiplied by the violin width.
"""
if self.hue_names is None:
for d in density:
if d.size > 1:
d /= max_density.max()
else:
for i, group in enumerate(density):
for d in group:
if scale_hue:
max = max_density[i].max()
else:
max = max_density.max()
if d.size > 1:
d /= max
def scale_width(self, density):
"""Scale each density curve to the same height."""
if self.hue_names is None:
for d in density:
d /= d.max()
else:
for group in density:
for d in group:
d /= d.max()
def scale_count(self, density, counts, scale_hue):
"""Scale each density curve by the number of observations."""
if self.hue_names is None:
for count, d in zip(counts, density):
d /= d.max()
d *= count / counts.max()
else:
for i, group in enumerate(density):
for j, d in enumerate(group):
count = counts[i, j]
if scale_hue:
scaler = count / counts[i].max()
else:
scaler = count / counts.max()
d /= d.max()
d *= scaler
@property
def dwidth(self):
if self.hue_names is None:
return self.width / 2
elif self.split:
return self.width / 2
else:
return self.width / (2 * len(self.hue_names))
def draw_violins(self, ax):
"""Draw the violins onto `ax`."""
fill_func = ax.fill_betweenx if self.orient == "v" else ax.fill_between
for i, group_data in enumerate(self.plot_data):
kws = dict(edgecolor=self.gray, linewidth=self.linewidth)
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
support, density = self.support[i], self.density[i]
# Handle special case of no observations in this bin
if support.size == 0:
continue
# Handle special case of a single observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
self.draw_single_observation(ax, i, val, d)
continue
# Draw the violin for this group
grid = np.ones(self.gridsize) * i
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
color=self.colors[i],
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, support, density, i)
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data, support, density, i)
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data, support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
support, density = self.support[i][j], self.density[i][j]
kws["color"] = self.colors[j]
# Add legend data, but just for one set of violins
if not i:
self.add_legend_data(ax, support[0], 0,
self.colors[j],
hue_level)
# Handle the special case where we have no observations
if support.size == 0:
continue
# Handle the special case where we have one observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
if self.split:
d = d / 2
at_group = i + offsets[j]
self.draw_single_observation(ax, at_group, val, d)
continue
# Option 2a: we are drawing a single split violin
# -----------------------------------------------
if self.split:
grid = np.ones(self.gridsize) * i
if j:
fill_func(support,
grid,
grid + density * self.dwidth,
**kws)
else:
fill_func(support,
grid - density * self.dwidth,
grid,
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw quartile lines
if self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density, i,
["left", "right"][j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density, i,
["left", "right"][j])
# The box and point interior plots are drawn for
# all data at the group level, so we just do that once
if not j:
continue
# Get the whole vector for this group level
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2b: we are drawing full nested violins
# -----------------------------------------------
else:
grid = np.ones(self.gridsize) * (i + offsets[j])
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
**kws)
# Draw the interior representation
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density,
i + offsets[j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i + offsets[j])
def draw_single_observation(self, ax, at_group, at_quant, density):
"""Draw a line to mark a single observation."""
d_width = density * self.dwidth
if self.orient == "v":
ax.plot([at_group - d_width, at_group + d_width],
[at_quant, at_quant],
color=self.gray,
linewidth=self.linewidth)
else:
ax.plot([at_quant, at_quant],
[at_group - d_width, at_group + d_width],
color=self.gray,
linewidth=self.linewidth)
def draw_box_lines(self, ax, data, support, density, center):
"""Draw boxplot information at center of the density."""
# Compute the boxplot statistics
q25, q50, q75 = np.percentile(data, [25, 50, 75])
whisker_lim = 1.5 * iqr(data)
h1 = np.min(data[data >= (q25 - whisker_lim)])
h2 = np.max(data[data <= (q75 + whisker_lim)])
# Draw a boxplot using lines and a point
if self.orient == "v":
ax.plot([center, center], [h1, h2],
linewidth=self.linewidth,
color=self.gray)
ax.plot([center, center], [q25, q75],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(center, q50,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
else:
ax.plot([h1, h2], [center, center],
linewidth=self.linewidth,
color=self.gray)
ax.plot([q25, q75], [center, center],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(q50, center,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
def draw_quartiles(self, ax, data, support, density, center, split=False):
"""Draw the quartiles as lines at width of density."""
q25, q50, q75 = np.percentile(data, [25, 50, 75])
self.draw_to_density(ax, center, q25, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
self.draw_to_density(ax, center, q50, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 3] * 2)
self.draw_to_density(ax, center, q75, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
def draw_points(self, ax, data, center):
"""Draw individual observations as points at middle of the violin."""
kws = dict(s=np.square(self.linewidth * 2),
c=self.gray,
edgecolor=self.gray)
grid = np.ones(len(data)) * center
if self.orient == "v":
ax.scatter(grid, data, **kws)
else:
ax.scatter(data, grid, **kws)
def draw_stick_lines(self, ax, data, support, density,
center, split=False):
"""Draw individual observations as sticks at width of density."""
for val in data:
self.draw_to_density(ax, center, val, support, density, split,
linewidth=self.linewidth * .5)
def draw_to_density(self, ax, center, val, support, density, split, **kws):
"""Draw a line orthogonal to the value axis at width of density."""
idx = np.argmin(np.abs(support - val))
width = self.dwidth * density[idx] * .99
kws["color"] = self.gray
if self.orient == "v":
if split == "left":
ax.plot([center - width, center], [val, val], **kws)
elif split == "right":
ax.plot([center, center + width], [val, val], **kws)
else:
ax.plot([center - width, center + width], [val, val], **kws)
else:
if split == "left":
ax.plot([val, val], [center - width, center], **kws)
elif split == "right":
ax.plot([val, val], [center, center + width], **kws)
else:
ax.plot([val, val], [center - width, center + width], **kws)
def plot(self, ax):
"""Make the violin plot."""
self.draw_violins(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _StripPlotter(_BoxPlotter):
"""1-d scatterplot with categorical organization."""
def __init__(self, x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, 1)
# Set object attributes
self.split = split
self.width = .8
if jitter == 1: # Use a good default for `jitter = True`
jlim = 0.1
else:
jlim = float(jitter)
if self.hue_names is not None and split:
jlim /= len(self.hue_names)
self.jitterer = stats.uniform(-jlim, jlim * 2).rvs
def draw_stripplot(self, ax, kws):
"""Draw the points onto `ax`."""
# Set the default zorder to 2.1, so that the points
# will be drawn on top of line elements (like in a boxplot)
kws.setdefault("zorder", 2.1)
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Determine the positions of the points
strip_data = remove_na(group_data)
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[i]
# Draw the plot
if self.orient == "v":
ax.scatter(i + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, i + jitter, **kws)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
if not hue_mask.any():
continue
# Determine the positions of the points
strip_data = remove_na(group_data[hue_mask])
pos = i + offsets[j] if self.split else i
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[j]
# Only label one set of plots
if i:
kws.pop("label", None)
else:
kws["label"] = hue_level
# Draw the plot
if self.orient == "v":
ax.scatter(pos + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, pos + jitter, **kws)
def plot(self, ax, kws):
"""Make the plot."""
self.draw_stripplot(ax, kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _SwarmPlotter(_BoxPlotter):
def __init__(self):
pass
def plot(self, ax):
pass
_boxplot_docs = dict(
# Shared narrative docs
main_api_narrative=dedent("""\
Input data can be passed in a variety of formats, including:
- A "long-form" DataFrame, in which case the ``x``, ``y``, and ``hue``
variables will determine how the data are plotted.
- A "wide-form" DatFrame, such that each numeric column will be plotted.
- Anything accepted by ``plt.boxplot`` (e.g. a 2d array or list of vectors)
It is also possible to pass vector data directly to ``x``, ``y``, or
``hue``, and thus avoid passing a dataframe to ``data``.
In all cases, it is possible to use numpy or Python objects, but pandas
objects are preferable because the associated names will be used to
annotate the axes. Additionally, you can use Categorical types for the
grouping variables to control the order of plot elements.\
"""),
# Shared function parameters
main_api_params=dedent("""\
x, y, hue : names of variable in ``data`` or vector data, optional
Variables for plotting long-form data. See examples for interpretation.
data : DataFrame, array, or list of arrays, optional
Dataset for plotting. If ``x`` and ``y`` are absent, this is
interpreted as wide-form. Otherwise it is expected to be long-form.
order, hue_order : lists of strings, optional
Order to plot the categorical levels in, otherwise the levels are
inferred from the data objects.\
"""),
orient=dedent("""\
orient : "v" | "h", optional
Orientation of the plot (vertical or horizontal). This can also be
inferred when using long-form data and Categorical data types.\
"""),
color=dedent("""\
color : matplotlib color, optional
Color for all of the elements, or seed for :func:`light_palette` when
using hue nesting.\
"""),
palette=dedent("""\
palette : palette name, list, or dict, optional
Color palette that maps either the grouping variable or the hue
variable.\
"""),
saturation=dedent("""\
saturation : float, optional
Proportion of the original saturation to draw colors at. Large patches
often look better with slightly desaturated colors, but set this to
``1`` if you want the plot colors to perfectly match the input color
spec.\
"""),
width=dedent("""\
width : float, optional
Width of a full element when not using hue nesting, or width of all the
elements for one level of the major grouping variable.\
"""),
linewidth=dedent("""\
linewidth : float, optional
Width of the gray lines that frame the plot elements.\
"""),
ax_in=dedent("""\
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.\
"""),
ax_out=dedent("""\
ax : matplotlib Axes
Returns the Axes object with the boxplot drawn onto it.\
"""),
# Shared see also
boxplot=dedent("""\
boxplot : A traditional box-and-whisker plot with a similar API.\
"""),
violinplot=dedent("""\
violinplot : A combination of boxplot and kernel density estimation.\
"""),
stripplot=dedent("""\
stripplot : A scatterplot where one variable is categorical. Can be used
in conjunction with a boxplot to show each observation.\
"""),
)
def boxplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
width=.8, fliersize=5, linewidth=None, whis=1.5, notch=False,
ax=None, **kwargs):
plotter = _BoxPlotter(x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth)
if ax is None:
ax = plt.gca()
kwargs.update(dict(whis=whis, notch=notch))
plotter.plot(ax, kwargs)
return ax
boxplot.__doc__ = dedent("""\
Draw a box-and-whisker plot.
{main_api_narrative}
Parameters
----------
{main_api_params}
{orient}
{color}
{palette}
{saturation}
{width}
fliersize : float, optional
Size of the markers used to indicate outlier observations.
{linewidth}
whis : float, optional
Proportion of the IQR past the low and high quartiles to extend the
plot whiskers. Points outside this range will be identified as
outliers.
notch : boolean, optional
Whether to "notch" the box to indicate a confidence interval for the
median. There are several other parameters that can control how the
notches are drawn; see the ``plt.boxplot`` help for more information
on them.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.boxplot`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{violinplot}
{stripplot}
Examples
--------
Draw a single horizontal boxplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.boxplot(x=tips["total_bill"])
Draw a vertical boxplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
Draw a boxplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="Set3")
Draw a boxplot with nested grouping when some bins are empty:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="time",
... data=tips, linewidth=2.5)
Draw a boxplot for each numeric variable in a DataFrame:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> ax = sns.boxplot(data=iris, orient="h", palette="Set2")
Use :func:`stripplot` to show the datapoints on top of the boxes:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... size=4, jitter=True, edgecolor="gray")
Draw a box plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.boxplot, "sex", "total_bill", "smoker")
... .despine(left=True)
... .add_legend(title="smoker")) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_boxplot_docs)
def violinplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True, gridsize=100,
width=.8, inner="box", split=False, orient=None, linewidth=None,
color=None, palette=None, saturation=.75, ax=None):
plotter = _ViolinPlotter(x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
violinplot.__doc__ = dedent("""\
Draw a combination of boxplot and kernel density estimate.
A violin plot plays a similar role as a box and whisker plot. It shows the
distribution of quantitative data across several levels of one (or more)
categorical variables such that those distributions can be compared. Unlike
a boxplot, in which all of the plot components correspond to actual
datapoints, the violin plot features a kernel density estimation of the
underlying distribution.
This can be an effective and attractive way to show multiple distributions
of data at once, but keep in mind that the estimation procedure is
influenced by the sample size, and violins for relatively small samples
might look misleadingly smooth.
{main_api_narrative}
Parameters
----------
{main_api_params}
bw : {{'scott', 'silverman', float}}, optional
Either the name of a reference rule or the scale factor to use when
computing the kernel bandwidth. The actual kernel size will be
determined by multiplying the scale factor by the standard deviation of
the data within each bin.
cut : float, optional
Distance, in units of bandwidth size, to extend the density past the
extreme datapoints. Set to 0 to limit the violin range within the range
of the observed data (i.e., to have the same effect as ``trim=True`` in
``ggplot``.
scale : {{"area", "count", "width"}}, optional
The method used to scale the width of each violin. If ``area``, each
violin will have the same area. If ``count``, the width of the violins
will be scaled by the number of observations in that bin. If ``width``,
each violin will have the same width.
scale_hue : bool, optional
When nesting violins using a ``hue`` variable, this parameter
determines whether the scaling is computed within each level of the
major grouping variable (``scale_hue=True``) or across all the violins
on the plot (``scale_hue=False``).
gridsize : int, optional
Number of points in the discrete grid used to compute the kernel
density estimate.
{width}
inner : {{"box", "quartile", "point", "stick", None}}, optional
Representation of the datapoints in the violin interior. If ``box``,
draw a miniature boxplot. If ``quartiles``, draw the quartiles of the
distribution. If ``point`` or ``stick``, show each underlying
datapoint. Using ``None`` will draw unadorned violins.
split : bool, optional
When using hue nesting with a variable that takes two levels, setting
``split`` to True will draw half of a violin for each level. This can
make it easier to directly compare the distributions.
{orient}
{linewidth}
{color}
{palette}
{saturation}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{stripplot}
Examples
--------
Draw a single horizontal violinplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.violinplot(x=tips["total_bill"])
Draw a vertical violinplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips)
Draw a violinplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted")
Draw split violins to compare the across the hue variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted", split=True)
Scale the violin width by the number of observations in each bin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count")
Draw the quartiles as horizontal lines instead of a mini-box:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="quartile")
Show each observation with a stick inside the violin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick")
Scale the density relative to the counts across all bins:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick", scale_hue=False)
Use a narrow bandwidth to reduce the amount of smoothing:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick",
... scale_hue=False, bw=.2)
Draw horizontal violins (if the grouping variable has a ``Categorical``
dtype, the ``orient`` argument can be omitted):
.. plot::
:context: close-figs
>>> planets = sns.load_dataset("planets")
>>> ax = sns.violinplot(x="orbital_period", y="method",
... data=planets[planets.orbital_period < 1000],
... scale="width", orient="h", palette="Set3")
Draw a violin plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.violinplot, "sex", "total_bill", "smoker", split=True)
... .despine(left=True)
... .add_legend(title="smoker")) # doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_boxplot_docs)
def stripplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
jitter=False, split=True, orient=None, color=None, palette=None,
size=7, edgecolor="w", linewidth=1, ax=None, **kwargs):
plotter = _StripPlotter(x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette)
if ax is None:
ax = plt.gca()
kwargs.update(dict(s=size ** 2, edgecolor=edgecolor, linewidth=linewidth))
if edgecolor == "gray":
kwargs["edgecolor"] = plotter.gray
plotter.plot(ax, kwargs)
return ax
stripplot.__doc__ = dedent("""\
Draw a scatterplot where one variable is categorical.
A strip plot can be drawn on its own, but it is also a good complement
to a box or violinplot in cases where you want to show all observations
along with some representation of the underlying distribution.
{main_api_narrative}
Parameters
----------
{main_api_params}
jitter : float, ``True``/``1`` is special-cased, optional
Amount of jitter (only along the categorical axis) to apply. This
can be useful when you have many points and they overlap, so that
it is easier to see the distribution. You can specify the amount
of jitter (half the width of the uniform random variable support),
or just use ``True`` for a good default.
split : bool, optional
When using ``hue`` nesting, setting this to ``True`` will separate
the strips for different hue levels along the categorical axis.
Otherwise, the points for each level will be plotted on top of
each other.
{orient}
{color}
{palette}
size : float, optional
Diameter of the markers, in points. (Although ``plt.scatter`` is used
to draw the points, the ``size`` argument here takes a "normal"
markersize and not size^2 like ``plt.scatter``.
edgecolor : matplotlib color, "gray" is special-cased, optional
Color of the lines around each point. If you pass ``"gray"``, the
brightness is determined by the color palette used for the body
of the points.
{linewidth}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{violinplot}
Examples
--------
Draw a single horizontal strip plot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.stripplot(x=tips["total_bill"])
Group the strips by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips)
Add jitter to bring out the distribution of values:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=True)
Use a smaller amount of jitter:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=0.05)
Draw horizontal strips (if the grouping variable has a ``Categorical``
dtype, the ``orient`` argument can be omitted):
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="total_bill", y="day", data=tips,
... jitter=True, orient="h")
Nest the strips within a second categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="sex", y="total_bill", hue="day",
... data=tips, jitter=True)
Draw each level of the ``hue`` variable at the same location on the
major categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", hue="smoker",
... data=tips, jitter=True,
... palette="Set2", split=False)
Draw strips with large points and different aesthetics:
.. plot::
:context: close-figs
>>> ax = sns.stripplot("day", "total_bill", "smoker", data=tips,
... palette="Set2", size=20, marker="D",
... edgecolor="gray", alpha=.25)
Draw strips of observations on top of a box plot:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="total_bill", y="day", data=tips,
... orient="h", whis=np.inf)
>>> ax = sns.stripplot(x="total_bill", y="day", data=tips,
... jitter=True, orient="h")
Draw strips of observations on top of a violin plot
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips, inner=None)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... jitter=True, color="white", edgecolor="gray")
""").format(**_boxplot_docs)
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From http://stats.stackexchange.com/questions/798/
a = np.asarray(a)
h = 2 * iqr(a) / (len(a) ** (1 / 3))
# fall back to 10 bins if iqr is 0
if h == 0:
return 10.
else:
return np.ceil((a.max() - a.min()) / h)
def distplot(a, bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
color=None, vertical=False, norm_hist=False, axlabel=None,
label=None, ax=None):
"""Flexibly plot a distribution of observations.
Parameters
----------
a : (squeezable to) 1d array
Observed data.
bins : argument for matplotlib hist(), or None, optional
Specification of hist bins, or None to use Freedman-Diaconis rule.
hist : bool, optional
Whether to plot a (normed) histogram.
kde : bool, optional
Whether to plot a gaussian kernel density estimate.
rug : bool, optional
Whether to draw a rugplot on the support axis.
fit : random variable object, optional
An object with `fit` method, returning a tuple that can be passed to a
`pdf` method a positional arguments following an grid of values to
evaluate the pdf on.
{hist, kde, rug, fit}_kws : dictionaries, optional
Keyword arguments for underlying plotting functions.
color : matplotlib color, optional
Color to plot everything but the fitted curve in.
vertical : bool, optional
If True, oberved values are on y-axis.
norm_hist : bool, otional
If True, the histogram height shows a density rather than a count.
This is implied if a KDE or fitted density is plotted.
axlabel : string, False, or None, optional
Name for the support axis label. If None, will try to get it
from a.namel if False, do not set a label.
label : string, optional
Legend label for the relevent component of the plot
ax : matplotlib axis, optional
if provided, plot on this axis
Returns
-------
ax : matplotlib axis
"""
if ax is None:
ax = plt.gca()
# Intelligently label the support axis
label_ax = bool(axlabel)
if axlabel is None and hasattr(a, "name"):
axlabel = a.name
if axlabel is not None:
label_ax = True
# Make a a 1-d array
a = np.asarray(a).squeeze()
# Decide if the hist is normed
norm_hist = norm_hist or kde or (fit is not None)
# Handle dictionary defaults
if hist_kws is None:
hist_kws = dict()
if kde_kws is None:
kde_kws = dict()
if rug_kws is None:
rug_kws = dict()
if fit_kws is None:
fit_kws = dict()
# Get the color from the current color cycle
if color is None:
if vertical:
line, = ax.plot(0, a.mean())
else:
line, = ax.plot(a.mean(), 0)
color = line.get_color()
line.remove()
# Plug the label into the right kwarg dictionary
if label is not None:
if hist:
hist_kws["label"] = label
elif kde:
kde_kws["label"] = label
elif rug:
rug_kws["label"] = label
elif fit:
fit_kws["label"] = label
if hist:
if bins is None:
bins = _freedman_diaconis_bins(a)
hist_kws.setdefault("alpha", 0.4)
hist_kws.setdefault("normed", norm_hist)
orientation = "horizontal" if vertical else "vertical"
hist_color = hist_kws.pop("color", color)
ax.hist(a, bins, orientation=orientation,
color=hist_color, **hist_kws)
if hist_color != color:
hist_kws["color"] = hist_color
if kde:
kde_color = kde_kws.pop("color", color)
kdeplot(a, vertical=vertical, ax=ax, color=kde_color, **kde_kws)
if kde_color != color:
kde_kws["color"] = kde_color
if rug:
rug_color = rug_kws.pop("color", color)
axis = "y" if vertical else "x"
rugplot(a, axis=axis, ax=ax, color=rug_color, **rug_kws)
if rug_color != color:
rug_kws["color"] = rug_color
if fit is not None:
fit_color = fit_kws.pop("color", "#282828")
gridsize = fit_kws.pop("gridsize", 200)
cut = fit_kws.pop("cut", 3)
clip = fit_kws.pop("clip", (-np.inf, np.inf))
bw = stats.gaussian_kde(a).scotts_factor() * a.std(ddof=1)
x = _kde_support(a, bw, gridsize, cut, clip)
params = fit.fit(a)
pdf = lambda x: fit.pdf(x, *params)
y = pdf(x)
if vertical:
x, y = y, x
ax.plot(x, y, color=fit_color, **fit_kws)
if fit_color != "#282828":
fit_kws["color"] = fit_color
if label_ax:
if vertical:
ax.set_ylabel(axlabel)
else:
ax.set_xlabel(axlabel)
return ax
def _univariate_kdeplot(data, shade, vertical, kernel, bw, gridsize, cut,
clip, legend, ax, cumulative=False, **kwargs):
"""Plot a univariate kernel density estimate on one of the axes."""
# Sort out the clipping
if clip is None:
clip = (-np.inf, np.inf)
# Calculate the KDE
if _has_statsmodels:
# Prefer using statsmodels for kernel flexibility
x, y = _statsmodels_univariate_kde(data, kernel, bw,
gridsize, cut, clip,
cumulative=cumulative)
else:
# Fall back to scipy if missing statsmodels
if kernel != "gau":
kernel = "gau"
msg = "Kernel other than `gau` requires statsmodels."
warnings.warn(msg, UserWarning)
if cumulative:
raise ImportError("Cumulative distributions are currently"
"only implemented in statsmodels."
"Please install statsmodels.")
x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
# Flip the data if the plot should be on the y axis
if vertical:
x, y = y, x
# Check if a label was specified in the call
label = kwargs.pop("label", None)
# Otherwise check if the data object has a name
if label is None and hasattr(data, "name"):
label = data.name
# Decide if we're going to add a legend
legend = label is not None and legend
label = "_nolegend_" if label is None else label
# Use the active color cycle to find the plot color
line, = ax.plot(x, y, **kwargs)
color = line.get_color()
line.remove()
kwargs.pop("color", None)
# Draw the KDE plot and, optionally, shade
ax.plot(x, y, color=color, label=label, **kwargs)
alpha = kwargs.get("alpha", 0.25)
if shade:
if vertical:
ax.fill_betweenx(y, 1e-12, x, color=color, alpha=alpha)
else:
ax.fill_between(x, 1e-12, y, color=color, alpha=alpha)
# Draw the legend here
if legend:
ax.legend(loc="best")
return ax
def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,
cumulative=False):
"""Compute a univariate kernel density estimate using statsmodels."""
fft = kernel == "gau"
kde = smnp.KDEUnivariate(data)
kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)
if cumulative:
grid, y = kde.support, kde.cdf
else:
grid, y = kde.support, kde.density
return grid, y
def _scipy_univariate_kde(data, bw, gridsize, cut, clip):
"""Compute a univariate kernel density estimate using scipy."""
try:
kde = stats.gaussian_kde(data, bw_method=bw)
except TypeError:
kde = stats.gaussian_kde(data)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
if isinstance(bw, str):
bw = "scotts" if bw == "scott" else bw
bw = getattr(kde, "%s_factor" % bw)()
grid = _kde_support(data, bw, gridsize, cut, clip)
y = kde(grid)
return grid, y
def _bivariate_kdeplot(x, y, filled, kernel, bw, gridsize, cut, clip, axlabel,
ax, **kwargs):
"""Plot a joint KDE estimate as a bivariate contour plot."""
# Determine the clipping
if clip is None:
clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
elif np.ndim(clip) == 1:
clip = [clip, clip]
# Calculate the KDE
if _has_statsmodels:
xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip)
else:
xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip)
# Plot the contours
n_levels = kwargs.pop("n_levels", 10)
cmap = kwargs.get("cmap", "BuGn" if filled else "BuGn_d")
if isinstance(cmap, str):
if cmap.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(cmap.replace("_d", "_r"), 2))
cmap = blend_palette(pal, as_cmap=True)
kwargs["cmap"] = cmap
contour_func = ax.contourf if filled else ax.contour
contour_func(xx, yy, z, n_levels, **kwargs)
kwargs["n_levels"] = n_levels
# Label the axes
if hasattr(x, "name") and axlabel:
ax.set_xlabel(x.name)
if hasattr(y, "name") and axlabel:
ax.set_ylabel(y.name)
return ax
def _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using statsmodels."""
if isinstance(bw, str):
bw_func = getattr(smnp.bandwidths, "bw_" + bw)
x_bw = bw_func(x)
y_bw = bw_func(y)
bw = [x_bw, y_bw]
elif np.isscalar(bw):
bw = [bw, bw]
if isinstance(x, pd.Series):
x = x.values
if isinstance(y, pd.Series):
y = y.values
kde = smnp.KDEMultivariate([x, y], "cc", bw)
x_support = _kde_support(x, kde.bw[0], gridsize, cut, clip[0])
y_support = _kde_support(y, kde.bw[1], gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using scipy."""
data = np.c_[x, y]
kde = stats.gaussian_kde(data.T)
data_std = data.std(axis=0, ddof=1)
if isinstance(bw, str):
bw = "scotts" if bw == "scott" else bw
bw_x = getattr(kde, "%s_factor" % bw)() * data_std[0]
bw_y = getattr(kde, "%s_factor" % bw)() * data_std[1]
elif np.isscalar(bw):
bw_x, bw_y = bw, bw
else:
msg = ("Cannot specify a different bandwidth for each dimension "
"with the scipy backend. You should install statsmodels.")
raise ValueError(msg)
x_support = _kde_support(data[:, 0], bw_x, gridsize, cut, clip[0])
y_support = _kde_support(data[:, 1], bw_y, gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def kdeplot(data, data2=None, shade=False, vertical=False, kernel="gau",
bw="scott", gridsize=100, cut=3, clip=None, legend=True, ax=None,
cumulative=False, **kwargs):
"""Fit and plot a univariate or bivarate kernel density estimate.
Parameters
----------
data : 1d or 2d array-like
Input data. If two-dimensional, assumed to be shaped (n_unit x n_var),
and a bivariate contour plot will be drawn.
data2: 1d array-like
Second input data. If provided `data` must be one-dimensional, and
a bivariate plot is produced.
shade : bool, optional
If true, shade in the area under the KDE curve (or draw with filled
contours when data is bivariate).
vertical : bool
If True, density is on x-axis.
kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional
Code for shape of kernel to fit with. Bivariate KDE can only use
gaussian kernel.
bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional
Name of reference method to determine kernel size, scalar factor,
or scalar for each dimension of the bivariate plot.
gridsize : int, optional
Number of discrete points in the evaluation grid.
cut : scalar, optional
Draw the estimate to cut * bw from the extreme data points.
clip : pair of scalars, or pair of pair of scalars, optional
Lower and upper bounds for datapoints used to fit KDE. Can provide
a pair of (low, high) bounds for bivariate plots.
legend : bool, optoinal
If True, add a legend or label the axes when possible.
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis.
cumulative : bool
If draw, draw the cumulative distribution estimated by the kde.
kwargs : other keyword arguments for plot()
Returns
-------
ax : matplotlib axis
Axis with plot.
"""
if ax is None:
ax = plt.gca()
data = data.astype(np.float64)
if data2 is not None:
data2 = data2.astype(np.float64)
bivariate = False
if isinstance(data, np.ndarray) and np.ndim(data) > 1:
bivariate = True
x, y = data.T
elif isinstance(data, pd.DataFrame) and np.ndim(data) > 1:
bivariate = True
x = data.iloc[:, 0].values
y = data.iloc[:, 1].values
elif data2 is not None:
bivariate = True
x = data
y = data2
if bivariate and cumulative:
raise TypeError("Cumulative distribution plots are not"
"supported for bivariate distributions.")
if bivariate:
ax = _bivariate_kdeplot(x, y, shade, kernel, bw, gridsize,
cut, clip, legend, ax, **kwargs)
else:
ax = _univariate_kdeplot(data, shade, vertical, kernel, bw,
gridsize, cut, clip, legend, ax,
cumulative=cumulative, **kwargs)
return ax
def rugplot(a, height=None, axis="x", ax=None, **kwargs):
"""Plot datapoints in an array as sticks on an axis.
Parameters
----------
a : vector
1D array of datapoints.
height : scalar, optional
Height of ticks, if None draw at 5% of axis range.
axis : {'x' | 'y'}, optional
Axis to draw rugplot on.
ax : matplotlib axis
Axis to draw plot into; otherwise grabs current axis.
kwargs : other keyword arguments for plt.plot()
Returns
-------
ax : matplotlib axis
Axis with rugplot.
"""
if ax is None:
ax = plt.gca()
a = np.asarray(a)
vertical = kwargs.pop("vertical", None)
if vertical is not None:
axis = "y" if vertical else "x"
other_axis = dict(x="y", y="x")[axis]
min, max = getattr(ax, "get_%slim" % other_axis)()
if height is None:
range = max - min
height = range * .05
if axis == "x":
ax.plot([a, a], [min, min + height], **kwargs)
else:
ax.plot([min, min + height], [a, a], **kwargs)
return ax
def jointplot(x, y, data=None, kind="scatter", stat_func=stats.pearsonr,
color=None, size=6, ratio=5, space=.2,
dropna=True, xlim=None, ylim=None,
joint_kws=None, marginal_kws=None, annot_kws=None):
"""Draw a plot of two variables with bivariate and univariate graphs.
Parameters
----------
x, y : strings or vectors
Data or names of variables in `data`.
data : DataFrame, optional
DataFrame when `x` and `y` are variable names.
kind : { "scatter" | "reg" | "resid" | "kde" | "hex" }, optional
Kind of plot to draw.
stat_func : callable or None
Function used to calculate a statistic about the relationship and
annotate the plot. Should map `x` and `y` either to a single value
or to a (value, p) tuple. Set to ``None`` if you don't want to
annotate the plot.
color : matplotlib color, optional
Color used for the plot elements.
size : numeric, optional
Size of the figure (it will be square).
ratio : numeric, optional
Ratio of joint axes size to marginal axes height.
space : numeric, optional
Space between the joint and marginal axes
dropna : bool, optional
If True, remove observations that are missing from `x` and `y`.
{x, y}lim : two-tuples, optional
Axis limits to set before plotting.
{joint, marginal, annot}_kws : dicts
Additional keyword arguments for the plot components.
Returns
-------
grid : JointGrid
JointGrid object with the plot on it.
See Also
--------
JointGrid : The Grid class used for drawing this plot. Use it directly if
you need more flexibility.
"""
# Set up empty default kwarg dicts
if joint_kws is None:
joint_kws = {}
if marginal_kws is None:
marginal_kws = {}
if annot_kws is None:
annot_kws = {}
# Make a colormap based off the plot color
if color is None:
color = color_palette()[0]
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [set_hls_values(color_rgb, l=l) for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Initialize the JointGrid object
grid = JointGrid(x, y, data, dropna=dropna,
size=size, ratio=ratio, space=space,
xlim=xlim, ylim=ylim)
# Plot the data using the grid
if kind == "scatter":
joint_kws.setdefault("color", color)
grid.plot_joint(plt.scatter, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = _freedman_diaconis_bins(grid.x)
y_bins = _freedman_diaconis_bins(grid.y)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("kde"):
joint_kws.setdefault("shade", True)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("shade", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("reg"):
from .linearmodels import regplot
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
from .linearmodels import residplot
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", False)
distplot(x, ax=grid.ax_marg_x, **marginal_kws)
distplot(y, vertical=True, fit=stats.norm, ax=grid.ax_marg_y,
**marginal_kws)
stat_func = None
else:
msg = "kind must be either 'scatter', 'reg', 'resid', 'kde', or 'hex'"
raise ValueError(msg)
if stat_func is not None:
grid.annotate(stat_func, **annot_kws)
return grid
|
bsd-3-clause
|
hotpxl/nebuchadnezzar
|
thesis_plots.py
|
1
|
29078
|
#!/usr/bin/env python3.4
import sys
import io
import math
import stats.data
import stats.plot
import stats.preprocess
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates
import pandas
import statsmodels.tsa.api
import statsmodels.tsa.stattools
import numpy as np
all_plots = []
def register_plot(func):
def ret(*args, **kwargs):
kwargs['func_name'] = func.__name__
return func(*args, **kwargs)
all_plots.append(ret)
return ret
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = io.StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
sys.stdout = self._stdout
@register_plot
def click_count_volume_line(func_name):
fig, ax0 = plt.subplots()
ax1 = ax0.twinx()
lines = []
d = stats.data.get_merged('600000', 'date', 'volume', 'clickCount')
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
volume = d[:, 1]
click_count = d[:, 2]
ax0.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
lines += ax0.plot(dates, volume, 'k-', label='Volume')
ax0.set_xlabel('Date')
ax0.set_ylabel('Volume')
lines += ax1.plot(dates, click_count, 'k:', label='Click count')
ax1.set_ylabel('Click count')
labels = [i.get_label() for i in lines]
ax0.grid()
ax0.legend(lines, labels, loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def click_count_volume_lag_selection(func_name):
d = stats.data.get_merged('600000', 'date', 'volume', 'clickCount')
volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
data = pandas.DataFrame({'volume': volume,
'clickCount': click_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
with Capturing() as output:
model.select_order()
print('\n'.join(output))
@register_plot
def click_count_volume_granger_causality_test_result(func_name):
d = stats.data.get_merged_old('600000', 'volume', 'readCount')
with Capturing() as output:
statsmodels.tsa.api.stattools.\
grangercausalitytests(d, 5, verbose=True)
print('\n'.join(output))
@register_plot
def click_count_volume_granger_causality_test(func_name):
d = stats.data.get_merged_old('600000', 'volume', 'readCount')
max_lag = 5
res = statsmodels.tsa.api.stattools.\
grangercausalitytests(d, max_lag, verbose=False)
ssr_chi2test = []
params_ftest = []
lrtest = []
ssr_ftest = []
for i in range(1, max_lag + 1):
ssr_chi2test.append(res[i][0]['ssr_chi2test'][1])
params_ftest.append(res[i][0]['params_ftest'][1])
lrtest.append(res[i][0]['lrtest'][1])
ssr_ftest.append(res[i][0]['ssr_ftest'][1])
x_axis = range(1, max_lag + 1)
fig, ax = plt.subplots()
ax.plot(x_axis, ssr_chi2test, 'k-', label='SSR $\chi^{2}$ test')
ax.plot(x_axis, params_ftest, 'k--', label='Params $F$ test')
ax.plot(x_axis, lrtest, 'k:', label='LR $\chi^{2}$ test')
ax.plot(x_axis, ssr_ftest, 'k-.', label='SSR $F$ test')
ax.set_ylabel('$p$ value')
ax.set_xlabel('Lag value')
ax.set_xticks(x_axis)
ax.grid()
ax.legend(loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def click_count_volume_granger_causality_test_on_sse_50(func_name):
results = []
tests = [
('ssr_ftest', 'SSR $F$ test'),
('params_ftest', 'Params $F$ test'),
('lrtest', 'LR test'),
('ssr_chi2test', 'SSR $\chi^{2}$ test'),
]
for index in stats.data.sse_indices():
d = stats.data.get_merged_old(index, 'date', 'volume', 'readCount')
volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
data = pandas.DataFrame({
'volume': volume,
'clickCount': click_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag_order = model.select_order(verbose=False)
lag = lag_order['hqic']
res = statsmodels.tsa.api.stattools.\
grangercausalitytests(d[:, 1:], lag, verbose=False)
cur = []
for i in tests:
cur.append(res[lag][0][i[0]][1])
results.append(cur)
fig, ax = plt.subplots()
index = np.arange(len(results))
bar_width = 0.8
for i in range(len(tests)):
plt.bar(index, np.asarray(results)[:, i].flatten(), bar_width, color='w', label=tests[i][1])
plt.xlabel('Stock')
plt.ylabel('$p$ value')
plt.legend(loc=0)
plt.savefig('thesis/plots/{}_{}.pdf'.format(func_name, tests[i][0]))
plt.clf()
@register_plot
def click_count_volume_granger_causality_test_on_sse_50_abnormal_lag_selection(func_name):
d = stats.data.get_merged_old('600028', 'date', 'volume', 'readCount')
volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
data = pandas.DataFrame({'volume': volume,
'clickCount': click_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
with Capturing() as output:
model.select_order()
print('\n'.join(output))
@register_plot
def click_count_volume_granger_causality_test_on_sse_50_abnormal(func_name):
d = stats.data.get_merged_old('600028', 'volume', 'readCount')
max_lag = 10
res = statsmodels.tsa.api.stattools.\
grangercausalitytests(d, max_lag, verbose=False)
ssr_chi2test = []
params_ftest = []
lrtest = []
ssr_ftest = []
for i in range(1, max_lag + 1):
ssr_chi2test.append(res[i][0]['ssr_chi2test'][1])
params_ftest.append(res[i][0]['params_ftest'][1])
lrtest.append(res[i][0]['lrtest'][1])
ssr_ftest.append(res[i][0]['ssr_ftest'][1])
x_axis = range(1, max_lag + 1)
fig, ax = plt.subplots()
ax.plot(x_axis, ssr_chi2test, 'k-', label='SSR $\chi^{2}$ test')
ax.plot(x_axis, params_ftest, 'k--', label='Params $F$ test')
ax.plot(x_axis, lrtest, 'k:', label='LR $\chi^{2}$ test')
ax.plot(x_axis, ssr_ftest, 'k-.', label='SSR $F$ test')
ax.set_ylabel('$p$ value')
ax.set_xlabel('Lag value')
ax.set_xticks(x_axis)
ax.grid()
ax.legend(loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def click_count_volume_granger_causality_test_on_sse_50_abnormal_line(func_name):
fig, ax0 = plt.subplots()
ax1 = ax0.twinx()
lines = []
d = stats.data.get_merged_old('600028', 'date', 'volume', 'readCount')
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
volume = d[:, 1]
click_count = d[:, 2]
ax0.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
lines += ax0.plot(dates, volume, 'k-', label='Volume')
ax0.set_xlabel('Date')
ax0.set_ylabel('Volume')
lines += ax1.plot(dates, click_count, 'k:', label='Click count')
ax1.set_ylabel('Click count')
labels = [i.get_label() for i in lines]
ax0.grid()
ax0.legend(lines, labels, loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def click_count_volume_line_to_forecast(func_name):
fig, ax0 = plt.subplots()
ax1 = ax0.twinx()
lines = []
d = stats.data.get_merged_old('600036', 'date', 'volume', 'readCount')
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
volume = d[:, 1]
click_count = d[:, 2]
ax0.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
lines += ax0.plot(dates, volume, 'k-', label='Volume')
ax0.set_xlabel('Date')
ax0.set_ylabel('Volume')
lines += ax1.plot(dates, click_count, 'k:', label='Click count')
ax1.set_ylabel('Click count')
labels = [i.get_label() for i in lines]
ax0.grid()
ax0.legend(lines, labels, loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def click_count_volume_var(func_name):
d = stats.data.get_merged_old(600036, 'date', 'volume', 'readCount')
volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
data = pandas.DataFrame({
'volume': volume,
'clickCount': click_count
})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
results = model.fit(ic='hqic')
print(results.summary())
@register_plot
def click_count_forecast_volume(func_name):
d = stats.data.get_merged_old(600036, 'date', 'volume', 'readCount')
volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
data = pandas.DataFrame({
'volume': volume,
'clickCount': click_count
})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag = model.select_order(verbose=False)['hqic']
length = data.values.shape[0]
results = model.fit(ic='hqic')
prediction = [0] * (lag)
for j in range(lag, length):
prediction.append(results.forecast(data.values[j - lag: j], 1)[0][1])
cnt = 0
for j in range(lag, length):
diff = prediction[j] - volume[j]
cnt += diff ** 2
print(math.sqrt(cnt / (length - lag)) / (max(volume) - min(volume)))
fig, ax = plt.subplots()
ax.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
ax.plot(dates, volume, 'k-', label='Real')
ax.plot(dates, prediction, 'k--', label='Prediction')
ax.set_ylabel('Volume')
ax.set_xlabel('Date')
ax.grid()
ax.legend(loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def click_count_step_5_forecast_volume(func_name):
d = stats.data.get_merged_old(600036, 'date', 'volume', 'readCount')
volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
data = pandas.DataFrame({
'volume': volume,
'clickCount': click_count
})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag = model.select_order(verbose=False)['hqic']
length = data.values.shape[0]
results = model.fit(ic='hqic')
prediction = [0] * (lag)
for j in range(lag, length, 5):
prediction.extend(
map(lambda x: x[1],
results.forecast(data.values[j - lag: j], 5)))
prediction = prediction[:length]
cnt = 0
for j in range(lag, length):
diff = prediction[j] - volume[j]
cnt += diff ** 2
print(math.sqrt(cnt / (length - lag)) / (max(volume) - min(volume)))
fig, ax = plt.subplots()
ax.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
ax.plot(dates, volume, 'k-', label='Real')
ax.plot(dates, prediction, 'k--', label='Prediction')
ax.set_ylabel('Volume')
ax.set_xlabel('Date')
ax.grid()
ax.legend(loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def click_count_volume_sliding_ratio_line(func_name):
d = stats.data.get_merged_old(600036, 'date', 'volume', 'readCount')
window_size = 7
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
raw_volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
volume = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(raw_volume, window_size)))
click_count = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(click_count, window_size)))
fig, ax0 = plt.subplots()
ax1 = ax0.twinx()
lines = []
ax0.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
lines += ax0.plot(dates, volume, 'k-', label='Volume ratio')
ax0.set_xlabel('Date')
ax0.set_ylabel('Volume')
lines += ax1.plot(dates, click_count, 'k:', label='Click count ratio')
ax1.set_ylabel('Click count')
labels = [i.get_label() for i in lines]
ax0.grid()
ax0.legend(lines, labels, loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def click_count_forecast_volume_sliding_ratio(func_name):
d = stats.data.get_merged_old(600036, 'date', 'volume', 'readCount')
window_size = 7
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
raw_volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
volume = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(raw_volume, window_size)))
click_count = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(click_count, window_size)))
data = pandas.DataFrame({
'volume': volume,
'clickCount': click_count
})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag = model.select_order(verbose=False)['hqic']
length = data.values.shape[0]
results = model.fit(ic='hqic')
prediction = [0] * (lag)
for j in range(lag, length):
prediction.append(results.forecast(data.values[j - lag: j], 1)[0][1])
cnt = 0
for j in range(lag, length):
diff = prediction[j] - volume[j]
cnt += diff ** 2
print(math.sqrt(cnt / (length - lag)) / (max(volume) - min(volume)))
fig, ax = plt.subplots()
ax.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
ax.plot(dates, volume, 'k-', label='Real')
ax.plot(dates, prediction, 'k--', label='Prediction')
ax.set_ylabel('Volume ratio')
ax.set_xlabel('Date')
ax.grid()
ax.legend(loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def click_count_forecast_volume_sliding_ratio_compare_window_size(func_name):
index = 600036
for window_size in range(2, 11):
d = stats.data.get_merged_old(index, 'date', 'volume', 'readCount')
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
raw_volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
volume = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(raw_volume, window_size)))
click_count = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(click_count, window_size)))
data = pandas.DataFrame({
'volume': volume,
'clickCount': click_count
})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag = model.select_order(verbose=False)['hqic']
length = data.values.shape[0]
results = model.fit(ic='hqic')
prediction = [0] * (lag)
if lag == 0:
continue
for j in range(lag, length):
prediction.append(results.forecast(data.values[j - lag: j], 1)[0][1])
cnt = 0
for j in range(lag, length):
diff = prediction[j] - volume[j]
cnt += diff ** 2
print('{} {}: {}'.format(index, window_size,
math.sqrt(cnt / (length - lag)) / (max(volume) - min(volume))))
@register_plot
def click_count_forecast_volume_sliding_ratio_compare_window_size_on_sse_50(func_name):
for index in stats.data.sse_indices():
arr = []
for window_size in range(2, 11):
d = stats.data.get_merged_old(index, 'date', 'volume', 'readCount')
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
raw_volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
volume = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(raw_volume, window_size)))
click_count = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(click_count, window_size)))
data = pandas.DataFrame({
'volume': volume,
'clickCount': click_count
})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag = model.select_order(verbose=False)['hqic']
length = data.values.shape[0]
results = model.fit(ic='hqic')
prediction = [0] * (lag)
if lag == 0:
continue
for j in range(lag, length):
prediction.append(results.forecast(data.values[j - lag: j], 1)[0][1])
cnt = 0
for j in range(lag, length):
diff = prediction[j] - volume[j]
cnt += diff ** 2
nrmse = math.sqrt(cnt / (length - lag)) / (max(volume) - min(volume))
arr.append((window_size, nrmse))
print('{}: {}'.format(index, min(arr, key=lambda x: x[1])))
@register_plot
def sliding_ratio_window_selection_on_sse_50(func_name):
window_sizes = []
for index in stats.data.sse_indices():
arr = []
for window_size in range(2, 11):
d = stats.data.get_merged_old(index, 'date', 'volume', 'readCount')
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
raw_volume = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
volume = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(raw_volume, window_size)))
click_count = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(click_count, window_size)))
data = pandas.DataFrame({
'volume': volume,
'clickCount': click_count
})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag = model.select_order(verbose=False)['hqic']
length = data.values.shape[0]
results = model.fit(ic='hqic')
prediction = [0] * (lag)
if lag == 0:
continue
for j in range(lag, length):
prediction.append(results.forecast(data.values[j - lag: j], 1)[0][1])
cnt = 0
for j in range(lag, length):
diff = prediction[j] - volume[j]
cnt += diff ** 2
nrmse = math.sqrt(cnt / (length - lag)) / (max(volume) - min(volume))
arr.append((window_size, nrmse))
window_sizes.append(min(arr, key=lambda x: x[1])[0])
fig, ax = plt.subplots()
index = np.arange(len(window_sizes))
bar_width = 0.8
plt.bar(index, np.asarray(window_sizes), bar_width, color='w', label='Window size')
plt.xlabel('Stock')
plt.ylabel('Window size')
plt.legend(loc=0)
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def click_count_price_line(func_name):
d = stats.data.get_merged_old('600000', 'date', 'close', 'readCount')
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
price = d[:, 1]
click_count = d[:, 2]
lines = []
fig, ax0 = plt.subplots()
ax1 = ax0.twinx()
ax0.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
lines += ax0.plot(dates, price, 'k-', label='Price')
ax0.set_xlabel('Date')
ax0.set_ylabel('Price')
lines += ax1.plot(dates, click_count, 'k:', label='Click count')
ax1.set_ylabel('Click count')
labels = [i.get_label() for i in lines]
ax0.grid()
ax0.legend(lines, labels, loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def click_count_price_lag_selection(func_name):
d = stats.data.get_merged_old('600000', 'date', 'close', 'readCount')
close = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
data = pandas.DataFrame({'close': close,
'clickCount': click_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
with Capturing() as output:
model.select_order()
print('\n'.join(output))
@register_plot
def click_count_price_granger_causality_test_result(func_name):
d = stats.data.get_merged_old('600000', 'close', 'readCount')
with Capturing() as output:
statsmodels.tsa.api.stattools.\
grangercausalitytests(d, 5, verbose=True)
print('\n'.join(output))
@register_plot
def click_count_price_granger_causality_test_on_sse_50(func_name):
results = []
tests = [
('ssr_ftest', 'SSR $F$ test'),
('params_ftest', 'Params $F$ test'),
('lrtest', 'LR test'),
('ssr_chi2test', 'SSR $\chi^{2}$ test'),
]
for index in stats.data.sse_indices():
d = stats.data.get_merged_old(index, 'date', 'close', 'readCount')
price = d[:, 1].astype(float)
click_count = d[:, 2].astype(float)
data = pandas.DataFrame({
'price': price,
'clickCount': click_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag_order = model.select_order(verbose=False)
lag = lag_order['hqic']
res = statsmodels.tsa.api.stattools.\
grangercausalitytests(d[:, 1:], lag, verbose=False)
cur = []
for i in tests:
cur.append(res[lag][0][i[0]][1])
results.append(cur)
fig, ax = plt.subplots()
index = np.arange(len(results))
bar_width = 0.8
for i in range(len(tests)):
plt.bar(index, np.asarray(results)[:, i].flatten(), bar_width, color='w', label=tests[i][1])
plt.xlabel('Stock')
plt.ylabel('$p$ value')
plt.legend(loc=0)
plt.savefig('thesis/plots/{}_{}.pdf'.format(func_name, tests[i][0]))
plt.clf()
@register_plot
def positive_click_count_price_line(func_name):
d = stats.data.get_merged_old('600000', 'date', 'close', 'readCount')
ds = stats.data.get_merged('600000', 'positiveCount', 'negativeCount')
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
price = d[:, 1]
click_count = np.multiply(
ds[:, 0].astype(float) / (ds[:, 0] + ds[:, 1]).astype(float),
d[:, 2].astype(float))
lines = []
fig, ax0 = plt.subplots()
ax1 = ax0.twinx()
ax0.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
lines += ax0.plot(dates, price, 'k-', label='Price')
ax0.set_xlabel('Date')
ax0.set_ylabel('Price')
lines += ax1.plot(dates, click_count, 'k:', label='Click count')
ax1.set_ylabel('Click count')
labels = [i.get_label() for i in lines]
ax0.grid()
ax0.legend(lines, labels, loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def positive_click_count_price_lag_selection(func_name):
d = stats.data.get_merged_old('600000', 'date', 'close', 'readCount')
ds = stats.data.get_merged('600000', 'positiveCount', 'negativeCount')
close = d[:, 1].astype(float)
click_count = np.multiply(ds[:, 0].astype(float) / (ds[:, 0] + ds[:, 1]).astype(float), d[:, 2].astype(float))
data = pandas.DataFrame({'close': close,
'clickCount': click_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
with Capturing() as output:
model.select_order()
print('\n'.join(output))
@register_plot
def positive_click_count_price_granger_causality_test_result(func_name):
d = stats.data.get_merged_old('600000', 'close', 'readCount')
ds = stats.data.get_merged('600000', 'positiveCount', 'negativeCount')
click_count = np.multiply(ds[:, 0].astype(float) / (ds[:, 0] + ds[:, 1]).astype(float), d[:, 1].astype(float))
data = np.concatenate((np.expand_dims(d[:, 0], 1), np.expand_dims(click_count, 1)), axis=1)
with Capturing() as output:
statsmodels.tsa.api.stattools.\
grangercausalitytests(data, 5, verbose=True)
print('\n'.join(output))
@register_plot
def positive_click_count_price_granger_causality_test_on_sse_50(func_name):
results = []
tests = [
('ssr_ftest', 'SSR $F$ test'),
('params_ftest', 'Params $F$ test'),
('lrtest', 'LR test'),
('ssr_chi2test', 'SSR $\chi^{2}$ test'),
]
for index in stats.data.sse_indices():
d = stats.data.get_merged_old(index, 'date', 'close', 'readCount')
ds = stats.data.get_merged(index, 'positiveCount', 'negativeCount')
price = d[:, 1].astype(float)
click_count = np.multiply(ds[:, 0].astype(float) / (ds[:, 0] + ds[:, 1]).astype(float), d[:, 2].astype(float))
data = pandas.DataFrame({
'price': price,
'clickCount': click_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag_order = model.select_order(verbose=False)
lag = lag_order['hqic']
res = statsmodels.tsa.api.stattools.\
grangercausalitytests(d[:, 1:], lag, verbose=False)
cur = []
for i in tests:
cur.append(res[lag][0][i[0]][1])
results.append(cur)
fig, ax = plt.subplots()
index = np.arange(len(results))
bar_width = 0.8
for i in range(len(tests)):
plt.bar(index, np.asarray(results)[:, i].flatten(), bar_width, color='w', label=tests[i][1])
plt.xlabel('Stock')
plt.ylabel('$p$ value')
plt.legend(loc=0)
plt.savefig('thesis/plots/{}_{}.pdf'.format(func_name, tests[i][0]))
plt.clf()
@register_plot
def positive_click_count_forecast_price(func_name):
d = stats.data.get_merged_old(600036, 'date', 'close', 'readCount')
ds = stats.data.get_merged(600036, 'positiveCount', 'negativeCount')
price = d[:, 1].astype(float)
click_count = np.multiply(ds[:, 0].astype(float) /
(ds[:, 0] + ds[:, 1]).astype(float), d[:, 2].astype(float))
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
data = pandas.DataFrame({
'price': price,
'clickCount': click_count
})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag = model.select_order(verbose=False)['hqic']
length = data.values.shape[0]
results = model.fit(ic='hqic')
prediction = [0] * (lag)
for j in range(lag, length):
prediction.append(results.forecast(data.values[j - lag: j], 1)[0][1])
cnt = 0
for j in range(lag, length):
diff = prediction[j] - price[j]
cnt += diff ** 2
print(math.sqrt(cnt / (length - lag)) / (max(price) - min(price)))
fig, ax = plt.subplots()
ax.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
dates = dates[lag:]
prediction = prediction[lag:]
price = price[lag:]
ax.plot(dates, price, 'k-', label='Real')
ax.plot(dates, prediction, 'k--', label='Prediction')
ax.set_ylabel('Price')
ax.set_xlabel('Date')
ax.grid()
ax.legend(loc=0)
plt.tight_layout()
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
@register_plot
def positive_click_count_forecast_price_on_sse_50(func_name):
res = []
for index in stats.data.sse_indices():
d = stats.data.get_merged_old(index, 'date', 'close', 'readCount')
ds = stats.data.get_merged(index, 'positiveCount', 'negativeCount')
price = d[:, 1].astype(float)
click_count = np.multiply(ds[:, 0].astype(float) /
(ds[:, 0] + ds[:, 1]).astype(float), d[:, 2].astype(float))
dates = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in d[:, 0]]
data = pandas.DataFrame({
'price': price,
'clickCount': click_count
})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = statsmodels.tsa.api.VAR(data)
lag = model.select_order(verbose=False)['hqic']
length = data.values.shape[0]
results = model.fit(ic='hqic')
prediction = [0] * (lag)
for j in range(lag, length):
prediction.append(results.forecast(
data.values[j - lag: j], 1)[0][1])
cnt = 0
for j in range(lag, length):
diff = prediction[j] - price[j]
cnt += diff ** 2
res.append(math.sqrt(cnt / (length - lag)) / (max(price) - min(price)))
fig, ax = plt.subplots()
index = np.arange(len(res))
bar_width = 0.8
plt.bar(index, np.asarray(res), bar_width, color='w', label='NRMSE')
plt.xlabel('Stock')
plt.ylabel('NRMSE')
plt.legend(loc=0)
plt.savefig('thesis/plots/{}.pdf'.format(func_name))
if __name__ == '__main__':
for i in all_plots:
i()
|
mit
|
StratsOn/zipline
|
zipline/examples/dual_ema_talib.py
|
3
|
3149
|
#!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dual Moving Average Crossover algorithm.
This algorithm buys apple once its short moving average crosses
its long moving average (indicating upwards momentum) and sells
its shares once the averages cross again (indicating downwards
momentum).
"""
# Import exponential moving average from talib wrapper
from zipline.transforms.ta import EMA
def initialize(context):
context.security = symbol('AAPL')
# Add 2 mavg transforms, one with a long window, one with a short window.
context.short_ema_trans = EMA(timeperiod=20)
context.long_ema_trans = EMA(timeperiod=40)
# To keep track of whether we invested in the stock or not
context.invested = False
def handle_data(context, data):
short_ema = context.short_ema_trans.handle_data(data)
long_ema = context.long_ema_trans.handle_data(data)
if short_ema is None or long_ema is None:
return
buy = False
sell = False
if (short_ema > long_ema).all() and not context.invested:
order(context.security, 100)
context.invested = True
buy = True
elif (short_ema < long_ema).all() and context.invested:
order(context.security, -100)
context.invested = False
sell = True
record(AAPL=data[context.security].price,
short_ema=short_ema[context.security],
long_ema=long_ema[context.security],
buy=buy,
sell=sell)
if __name__ == '__main__':
from datetime import datetime
import matplotlib.pyplot as plt
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.api import order, record, symbol
from zipline.utils.factory import load_from_yahoo
start = datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2014, 11, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
end=end)
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
results = algo.run(data).dropna()
fig = plt.figure()
ax1 = fig.add_subplot(211, ylabel='portfolio value')
results.portfolio_value.plot(ax=ax1)
ax2 = fig.add_subplot(212)
results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
ax2.plot(results.ix[results.buy].index, results.short_ema[results.buy],
'^', markersize=10, color='m')
ax2.plot(results.ix[results.sell].index, results.short_ema[results.sell],
'v', markersize=10, color='k')
plt.legend(loc=0)
plt.gcf().set_size_inches(18, 8)
plt.show()
|
apache-2.0
|
billy-inn/scikit-learn
|
examples/svm/plot_svm_regression.py
|
249
|
1451
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
bsd-3-clause
|
hippke/TTV-TDV-exomoons
|
create_figures/system_15.py
|
1
|
7440
|
"""n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 1000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print transit_duration
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = 0.4218 * 10**9
secondmoon = Body()
secondmoon.mass = M_gan
secondmoon.px = 0.48945554 * 10**9
thirdmoon = Body()
thirdmoon.mass = M_gan
thirdmoon.px = 0.77696224 * 10**9
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
thirdmoon.vy = math.sqrt(G * planet.mass * (2 / thirdmoon.px - 1 / thirdmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
gravity_thirdmoon = (thirdmoon.mass / planet.mass) * thirdmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon + gravity_thirdmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *thirdmoon.px ** 3) / (G * (thirdmoon.mass + planet.mass)))
orbit_duration = orbit_duration * 2.005
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon, thirdmoon])
# Output information
print 'TTV amplitude =', numpy.amax(ttv_array), \
'[min] = ', numpy.amax(ttv_array) * 60, '[sec]'
print 'TDV amplitude =', numpy.amax(tdv_array), \
'[min] = ', numpy.amax(tdv_array) * 60, '[sec]'
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparison with eccentric moon
plt.xlim(-0.15, +0.15)
plt.ylim(-0.65, +0.65)
plt.annotate(r"5:4:2", xy=(-0.145, +0.55), size=16)
plt.savefig("fig_system_15.eps", bbox_inches = 'tight')
|
mit
|
CaymanUnterborn/burnman
|
examples/example_averaging.py
|
4
|
7467
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_averaging
-----------------
This example shows the effect of different averaging schemes. Currently four
averaging schemes are available:
1. Voight-Reuss-Hill
2. Voight averaging
3. Reuss averaging
4. Hashin-Shtrikman averaging
See :cite:`Watt1976` Journal of Geophysics and Space Physics for explanations
of each averaging scheme.
*Specifically uses:*
* :class:`burnman.averaging_schemes.VoigtReussHill`
* :class:`burnman.averaging_schemes.Voigt`
* :class:`burnman.averaging_schemes.Reuss`
* :class:`burnman.averaging_schemes.HashinShtrikmanUpper`
* :class:`burnman.averaging_schemes.HashinShtrikmanLower`
*Demonstrates:*
* implemented averaging schemes
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1, os.path.abspath('..'))
import burnman
from burnman import minerals
if __name__ == "__main__":
""" choose 'slb2' (finite-strain 2nd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'slb3 (finite-strain 3rd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'mgd3' (mie-gruneisen-debeye 3rd order shear modulus,
matas et al. 2007)
or 'mgd2' (mie-gruneisen-debeye 2nd order shear modulus,
matas et al. 2007)
or 'bm2' (birch-murnaghan 2nd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))
or 'bm3' (birch-murnaghan 3rd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))"""
amount_perovskite = 0.6
rock = burnman.Composite([minerals.SLB_2011.mg_perovskite(),
minerals.SLB_2011.periclase()],
[amount_perovskite, 1.0 - amount_perovskite])
perovskitite = minerals.SLB_2011.mg_perovskite()
periclasite = minerals.SLB_2011.periclase()
# seismic model for comparison:
# pick from .prem() .slow() .fast() (see burnman/seismic.py)
seismic_model = burnman.seismic.PREM()
# set on how many depth slices the computations should be done
number_of_points = 20
# we will do our computation and comparison at the following depth values:
depths = np.linspace(700e3, 2800e3, number_of_points)
# alternatively, we could use the values where prem is defined:
# depths = seismic_model.internal_depth_list(mindepth=700.e3,
# maxdepth=2800.e3)
pressures, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate(
['pressure', 'density', 'v_p', 'v_s', 'v_phi'], depths)
temperatures = burnman.geotherm.brown_shankland(depths)
print("Calculations are done for:")
rock.debug_print()
# calculate the seismic velocities of the rock using a whole battery of
# averaging schemes:
# do the end members, here averaging scheme does not matter (though it
# defaults to Voigt-Reuss-Hill)
model_pv = burnman.Model(
perovskitite, pressures, temperatures, burnman.averaging_schemes.VoigtReussHill())
model_fp = burnman.Model(
periclasite, pressures, temperatures, burnman.averaging_schemes.VoigtReussHill())
# Voigt Reuss Hill / Voigt / Reuss averaging
model_vrh = burnman.Model(
rock, pressures, temperatures, burnman.averaging_schemes.VoigtReussHill())
model_v = burnman.Model(
rock, pressures, temperatures, burnman.averaging_schemes.Voigt())
model_r = burnman.Model(
rock, pressures, temperatures, burnman.averaging_schemes.Reuss())
# Upper/lower bound for Hashin-Shtrikman averaging
model_hsu = burnman.Model(
rock, pressures, temperatures, burnman.averaging_schemes.HashinShtrikmanUpper())
model_hsl = burnman.Model(
rock, pressures, temperatures, burnman.averaging_schemes.HashinShtrikmanLower())
# PLOTTING
# plot vs
fig = plt.figure()
plt.plot(
pressures / 1.e9, model_v.v_s() / 1.e3, color='c', linestyle='-', marker='^',
markersize=4, label='Voigt')
plt.plot(
pressures / 1.e9, model_r.v_s() / 1.e3, color='k', linestyle='-', marker='v',
markersize=4, label='Reuss')
plt.plot(
pressures / 1.e9, model_vrh.v_s() / 1.e3, color='b', linestyle='-', marker='x',
markersize=4, label='Voigt-Reuss-Hill')
plt.plot(
pressures / 1.e9, model_hsu.v_s() / 1.e3, color='r', linestyle='-', marker='x',
markersize=4, label='Hashin-Shtrikman')
plt.plot(
pressures / 1.e9, model_hsl.v_s() / 1.e3, color='r', linestyle='-', marker='x',
markersize=4)
plt.plot(
pressures / 1.e9, model_pv.v_s() / 1.e3, color='y', linestyle='-', marker='x',
markersize=4, label='Mg Perovskite')
plt.plot(
pressures / 1.e9, model_fp.v_s() / 1.e3, color='g', linestyle='-', marker='x',
markersize=4, label='Periclase')
plt.xlim(min(pressures) / 1.e9, max(pressures) / 1.e9)
plt.legend(loc='upper left', prop={'size': 11}, frameon=False)
plt.xlabel('pressure (GPa)')
plt.ylabel('Vs (km/s)')
vs_pv_norm = (model_pv.v_s() - model_fp.v_s()) / \
(model_pv.v_s() - model_fp.v_s())
vs_fp_norm = (model_fp.v_s() - model_fp.v_s()) / \
(model_pv.v_s() - model_fp.v_s())
vs_vrh_norm = (model_vrh.v_s() - model_fp.v_s()) / \
(model_pv.v_s() - model_fp.v_s())
vs_v_norm = (model_v.v_s() - model_fp.v_s()) / \
(model_pv.v_s() - model_fp.v_s())
vs_r_norm = (model_r.v_s() - model_fp.v_s()) / \
(model_pv.v_s() - model_fp.v_s())
vs_hsu_norm = (model_hsu.v_s() - model_fp.v_s()) / \
(model_pv.v_s() - model_fp.v_s())
vs_hsl_norm = (model_hsl.v_s() - model_fp.v_s()) / \
(model_pv.v_s() - model_fp.v_s())
ax = fig.add_axes([0.58, 0.18, 0.3, 0.3])
plt.plot(pressures / 1.e9, vs_v_norm, color='c', linestyle='-', marker='^',
markersize=4, label='Voigt')
plt.plot(pressures / 1.e9, vs_r_norm, color='k', linestyle='-', marker='v',
markersize=4, label='Reuss')
plt.plot(
pressures / 1.e9, vs_vrh_norm, color='b', linestyle='-', marker='x',
markersize=4, label='Voigt-Reuss-Hill')
plt.plot(
pressures / 1.e9, vs_hsl_norm, color='r', linestyle='-', marker='x',
markersize=4, label='Hashin-Shtrikman')
plt.plot(
pressures / 1.e9, vs_hsu_norm, color='r', linestyle='-', marker='x',
markersize=4)
plt.plot(
pressures / 1.e9, vs_pv_norm, color='y', linestyle='-', marker='x',
markersize=4, label='Mg Perovskite')
plt.plot(
pressures / 1.e9, vs_fp_norm, color='g', linestyle='-', marker='x',
markersize=4, label='Periclase')
ax.tick_params(labelsize=10)
plt.title("normalized by mixture endmembers", fontsize=10)
plt.xlim(min(pressures) / 1.e9, max(pressures) / 1.e9)
plt.ylim(-0.005, 1.005)
plt.xlabel('pressure (GPa)', fontsize=10)
plt.ylabel('normalized Vs', fontsize=10)
# plt.legend(loc='lower right')
plt.savefig("output_figures/example_averaging_normalized.png")
plt.show()
|
gpl-2.0
|
ZenDevelopmentSystems/scikit-learn
|
sklearn/ensemble/tests/test_weight_boosting.py
|
83
|
17276
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
|
bsd-3-clause
|
julienmalard/Tinamit
|
tinamit/datos/fuente.py
|
1
|
9434
|
import csv
import datetime as ft
import os
import numpy as np
import pandas as pd
from tinamit.config import _
from tinamit.cositas import detectar_codif
from எண்ணிக்கை import எண்ணுக்கு as எ
class Fuente(object):
"""
La clase pariente para fuentes de datos.
"""
def __init__(símismo, nombre, variables, lugares=None, fechas=None):
símismo.nombre = nombre
símismo.variables = [vr for vr in variables if vr not in [lugares, fechas]]
símismo._equiv_nombres = {}
símismo.n_obs = símismo._vec_var(símismo.variables[0], tx=True).size
símismo.lugares = símismo._obt_lugar(lugares)
símismo.fechas = símismo._obt_fecha(fechas)
def obt_vals(símismo, vars_interés, lugares=None, fechas=None):
vars_interés = vars_interés or símismo.variables
if isinstance(vars_interés, str):
vars_interés = [vars_interés]
coords = {}
if símismo.fechas is not None:
coords[_('fecha')] = símismo.fechas
if símismo.lugares is not None:
coords[_('lugar')] = símismo.lugares
vals = pd.DataFrame({
**{vr: símismo._vec_var(símismo._resolver_nombre(vr)) for vr in vars_interés if vr in símismo.variables},
**coords
})
vals = símismo._filtrar_lugares(símismo._filtrar_fechas(vals, fechas), lugares)
return vals.set_index(list(coords))
def _obt_lugar(símismo, lugares):
lugares = lugares or ''
if isinstance(lugares, str):
try:
lugares = símismo._vec_var(lugares, tx=True)
except KeyError:
return np.full(símismo.n_obs, lugares)
return lugares
def _obt_fecha(símismo, fechas):
if isinstance(fechas, str):
try:
fechas = símismo._vec_var(fechas, tx=True)
except KeyError:
pass
fechas = pd.to_datetime(fechas)
if isinstance(fechas, pd.Timestamp):
fechas = pd.to_datetime(np.full(símismo.n_obs, fechas.to_datetime64()))
elif isinstance(fechas, (ft.date, ft.datetime)):
fechas = pd.to_datetime(np.full(símismo.n_obs, fechas).astype(ft.datetime))
return fechas
def equiv_nombre(símismo, var, equiv):
símismo._equiv_nombres[equiv] = var
def _resolver_nombre(símismo, var):
try:
símismo._equiv_nombres[var]
except KeyError:
return var
def _vec_var(símismo, var, tx=False):
"""
Devuelve un vector de los valores de un variable.
Parameters
----------
var: str
El nombre del variable.
tx: bool
Si quieres formato de texto o numérico.
Returns
-------
np.ndarray
"""
raise NotImplementedError
@staticmethod
def _filtrar_lugares(vals, criteria):
if criteria is None:
return vals
criteria = [criteria] if isinstance(criteria, str) else criteria
return vals[vals[_('lugar')].isin(criteria)]
@staticmethod
def _filtrar_fechas(vals, criteria):
if criteria is None:
return vals
criteria = pd.to_datetime(criteria)
if isinstance(criteria, pd.Timestamp):
criteria = criteria.to_datetime64()
fechas = vals[_('fecha')]
if isinstance(criteria, tuple) and len(criteria) == 2:
cond = np.logical_and(np.less_equal(fechas, criteria[1]), np.greater_equal(fechas, criteria[0]))
else:
cond = fechas.isin(criteria)
return vals.where(cond, drop=True)
def __str__(símismo):
return símismo.nombre
class FuenteCSV(Fuente):
"""
Fuente para archivos ``.csv``.
"""
def __init__(símismo, archivo, nombre=None, lugares=None, fechas=None, cód_vacío=None):
"""
Parameters
----------
archivo: str
El archivo con los datos.
nombre: str
El nombre de la fuente.
lugares: str or np.ndarray or list
Los lugares que corresponden a los datos. Puede se nombre de una columna en el csv, el nombre de un
lugar de cual vienen todos los datos, o una lista de los lugares.
fechas: str or np.ndarray or list or datetime.datetime
Las fechas de los datos.
cód_vacío:
Código para identificar variables que faltan. ``NA`` y ``NaN`` ya están reconocidos.
"""
nombre = nombre or os.path.splitext(os.path.split(archivo)[1])[0]
símismo.archivo = archivo
símismo.codif = detectar_codif(archivo, máx_líneas=1)
cód_vacío = cód_vacío or ['na', 'NA', 'NaN', 'nan', 'NAN', '']
símismo.cód_vacío = [cód_vacío] if isinstance(cód_vacío, (int, float, str)) else cód_vacío
super().__init__(nombre, variables=símismo.obt_vars(), lugares=lugares, fechas=fechas)
def obt_vars(símismo):
with open(símismo.archivo, encoding=símismo.codif) as d:
lector = csv.reader(d)
nombres_cols = next(lector)
return nombres_cols
def _vec_var(símismo, var, tx=False):
l_datos = []
with open(símismo.archivo, encoding=símismo.codif) as d:
lector = csv.DictReader(d)
for n_f, f in enumerate(lector):
val = f[var].strip()
if not tx:
if val in símismo.cód_vacío:
val = np.nan
else:
try:
val = எ(val)
except ValueError:
val = float(val)
l_datos.append(val)
return np.array(l_datos)
class FuenteDic(Fuente):
"""
Fuente de datos en forma de diccionario.
"""
def __init__(símismo, dic, nombre, lugares=None, fechas=None):
"""
Parameters
----------
dic: dict
El diccionario con los datos.
nombre: str
El nombre de la fuente.
lugares: str or np.ndarray or list
Los lugares que corresponden a los datos. Puede se nombre de una llave en el dictionario, el nombre de un
lugar de cual vienen todos los datos, o una lista de los lugares.
fechas: str or np.ndarray or list or datetime.datetime
Las fechas de los datos.
"""
símismo.dic = dic
super().__init__(nombre, variables=list(símismo.dic), lugares=lugares, fechas=fechas)
def _vec_var(símismo, var, tx=False):
return np.array(símismo.dic[var])
class FuenteVarXarray(Fuente):
"""
Fuente para datos en formato de ``DataArray`` de ``xarray``.
"""
def __init__(símismo, obj, nombre, lugares=None, fechas=None):
"""
Parameters
----------
obj: xarray.DataArray
Los datos
nombre: str
El nombre de la fuente.
lugares: str or np.ndarray or list
Los lugares que corresponden a los datos. Puede se nombre de una columna en el ``DataArray``, el nombre de
un lugar de cual vienen todos los datos, o una lista de los lugares.
fechas: str or np.ndarray or list or datetime.datetime
Las fechas de los datos.
"""
símismo.obj = obj
super().__init__(nombre, variables=[símismo.obj.name], lugares=lugares, fechas=fechas)
def _vec_var(símismo, var, tx=False):
return símismo.obj
class FuenteBaseXarray(Fuente):
"""
Fuente para datos en formato de ``Dataset`` de ``xarray``.
"""
def __init__(símismo, obj, nombre, lugares=None, fechas=None):
"""
Parameters
----------
obj: xarray.Dataset
Los datos
nombre: str
El nombre de la fuente.
lugares: str or np.ndarray or list
Los lugares que corresponden a los datos. Puede se nombre de una columna en el ``Dataset``, el nombre de un
lugar de cual vienen todos los datos, o una lista de los lugares.
fechas: str or np.ndarray or list or datetime.datetime
Las fechas de los datos.
"""
símismo.obj = obj
super().__init__(nombre, variables=list(símismo.obj.data_vars), lugares=lugares, fechas=fechas)
def _vec_var(símismo, var, tx=False):
return símismo.obj[var]
class FuentePandas(Fuente):
"""
Fuente para datos en formato de ``DataFrame`` de ``xarray``.
"""
def __init__(símismo, obj, nombre, lugares=None, fechas=None):
"""
Parameters
----------
obj: pd.DataFrame
Los datos
nombre: str
El nombre de la fuente.
lugares: str or np.ndarray or list
Los lugares que corresponden a los datos. Puede se nombre de una columna en el ``Dataset``, el nombre de un
lugar de cual vienen todos los datos, o una lista de los lugares.
fechas: str or np.ndarray or list or datetime.datetime
Las fechas de los datos.
"""
símismo.obj = obj
super().__init__(nombre, variables=list(símismo.obj), lugares=lugares, fechas=fechas)
def _vec_var(símismo, var, tx=False):
return símismo.obj[var]
|
gpl-3.0
|
jabl/fdwatch
|
plotfds.py
|
1
|
1497
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8
"""
Copyright (c) 2014 Janne Blomqvist
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Plot the number of open file descriptors vs. time. Generate an input
file with the fdwatch "-p" option.
"""
import matplotlib.pyplot as plt
import sys
import numpy as np
d = np.loadtxt(sys.argv[1])
dt = d[:,0] - d[0, 0]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(dt, d[:,1])
plt.xlabel('Time (s)')
plt.ylabel('Open file descriptors')
plt.show()
|
mit
|
adamgreenhall/scikit-learn
|
sklearn/linear_model/tests/test_ransac.py
|
216
|
13290
|
import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
|
bsd-3-clause
|
nan86150/ImageFusion
|
lib/python2.7/site-packages/matplotlib/tests/test_colors.py
|
9
|
9307
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from nose.tools import assert_raises, assert_equal
import numpy as np
from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison, cleanup
def test_colormap_endian():
"""
Github issue #1005: a bug in putmask caused erroneous
mapping of 1.0 when input from a non-native-byteorder
array.
"""
cmap = cm.get_cmap("jet")
# Test under, over, and invalid along with values 0 and 1.
a = [-0.5, 0, 0.5, 1, 1.5, np.nan]
for dt in ["f2", "f4", "f8"]:
anative = np.ma.masked_invalid(np.array(a, dtype=dt))
aforeign = anative.byteswap().newbyteorder()
#print(anative.dtype.isnative, aforeign.dtype.isnative)
assert_array_equal(cmap(anative), cmap(aforeign))
def test_BoundaryNorm():
"""
Github issue #1258: interpolation was failing with numpy
1.7 pre-release.
"""
# TODO: expand this into a more general test of BoundaryNorm.
boundaries = [0, 1.1, 2.2]
vals = [-1, 0, 2, 2.2, 4]
expected = [-1, 0, 2, 3, 3]
# ncolors != len(boundaries) - 1 triggers interpolation
ncolors = len(boundaries)
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
def test_LogNorm():
"""
LogNorm ignored clip, now it has the same
behavior as Normalize, e.g., values > vmax are bigger than 1
without clip, with clip they are 1.
"""
ln = mcolors.LogNorm(clip=True, vmax=5)
assert_array_equal(ln([1, 6]), [0, 1.0])
def test_PowerNorm():
a = np.array([0, 0.5, 1, 1.5], dtype=np.float)
pnorm = mcolors.PowerNorm(1)
norm = mcolors.Normalize()
assert_array_almost_equal(norm(a), pnorm(a))
a = np.array([-0.5, 0, 2, 4, 8], dtype=np.float)
expected = [0, 0, 1/16, 1/4, 1]
pnorm = mcolors.PowerNorm(2, vmin=0, vmax=8)
assert_array_almost_equal(pnorm(a), expected)
assert_equal(pnorm(a[0]), expected[0])
assert_equal(pnorm(a[2]), expected[2])
assert_array_almost_equal(a[1:], pnorm.inverse(pnorm(a))[1:])
# Clip = True
a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float)
expected = [0, 0, 0, 1, 1]
pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=True)
assert_array_almost_equal(pnorm(a), expected)
assert_equal(pnorm(a[0]), expected[0])
assert_equal(pnorm(a[-1]), expected[-1])
# Clip = True at call time
a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float)
expected = [0, 0, 0, 1, 1]
pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=False)
assert_array_almost_equal(pnorm(a, clip=True), expected)
assert_equal(pnorm(a[0], clip=True), expected[0])
assert_equal(pnorm(a[-1], clip=True), expected[-1])
def test_Normalize():
norm = mcolors.Normalize()
vals = np.arange(-10, 10, 1, dtype=np.float)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
def test_SymLogNorm():
"""
Test SymLogNorm behavior
"""
norm = mcolors.SymLogNorm(3, vmax=5, linscale=1.2)
vals = np.array([-30, -1, 2, 6], dtype=np.float)
normed_vals = norm(vals)
expected = [0., 0.53980074, 0.826991, 1.02758204]
assert_array_almost_equal(normed_vals, expected)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
# Ensure that specifying vmin returns the same result as above
norm = mcolors.SymLogNorm(3, vmin=-30, vmax=5, linscale=1.2)
normed_vals = norm(vals)
assert_array_almost_equal(normed_vals, expected)
def _inverse_tester(norm_instance, vals):
"""
Checks if the inverse of the given normalization is working.
"""
assert_array_almost_equal(norm_instance.inverse(norm_instance(vals)), vals)
def _scalar_tester(norm_instance, vals):
"""
Checks if scalars and arrays are handled the same way.
Tests only for float.
"""
scalar_result = [norm_instance(float(v)) for v in vals]
assert_array_almost_equal(scalar_result, norm_instance(vals))
def _mask_tester(norm_instance, vals):
"""
Checks mask handling
"""
masked_array = np.ma.array(vals)
masked_array[0] = np.ma.masked
assert_array_equal(masked_array.mask, norm_instance(masked_array).mask)
@image_comparison(baseline_images=['levels_and_colors'],
extensions=['png'])
def test_cmap_and_norm_from_levels_and_colors():
data = np.linspace(-2, 4, 49).reshape(7, 7)
levels = [-1, 2, 2.5, 3]
colors = ['red', 'green', 'blue', 'yellow', 'black']
extend = 'both'
cmap, norm = mcolors.from_levels_and_colors(levels, colors, extend=extend)
ax = plt.axes()
m = plt.pcolormesh(data, cmap=cmap, norm=norm)
plt.colorbar(m)
# Hide the axes labels (but not the colorbar ones, as they are useful)
for lab in ax.get_xticklabels() + ax.get_yticklabels():
lab.set_visible(False)
def test_cmap_and_norm_from_levels_and_colors2():
levels = [-1, 2, 2.5, 3]
colors = ['red', (0, 1, 0), 'blue', (0.5, 0.5, 0.5), (0.0, 0.0, 0.0, 1.0)]
clr = mcolors.colorConverter.to_rgba_array(colors)
bad = (0.1, 0.1, 0.1, 0.1)
no_color = (0.0, 0.0, 0.0, 0.0)
masked_value = 'masked_value'
# Define the test values which are of interest.
# Note: levels are lev[i] <= v < lev[i+1]
tests = [('both', None, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: clr[4],
3.5: clr[4],
masked_value: bad}),
('min', -1, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: no_color,
3.5: no_color,
masked_value: bad}),
('max', -1, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: clr[3],
3.5: clr[3],
masked_value: bad}),
('neither', -2, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: no_color,
3.5: no_color,
masked_value: bad}),
]
for extend, i1, cases in tests:
cmap, norm = mcolors.from_levels_and_colors(levels, colors[0:i1],
extend=extend)
cmap.set_bad(bad)
for d_val, expected_color in cases.items():
if d_val == masked_value:
d_val = np.ma.array([1], mask=True)
else:
d_val = [d_val]
assert_array_equal(expected_color, cmap(norm(d_val))[0],
'Wih extend={0!r} and data '
'value={1!r}'.format(extend, d_val))
assert_raises(ValueError, mcolors.from_levels_and_colors, levels, colors)
def test_rgb_hsv_round_trip():
for a_shape in [(500, 500, 3), (500, 3), (1, 3), (3,)]:
np.random.seed(0)
tt = np.random.random(a_shape)
assert_array_almost_equal(tt,
mcolors.hsv_to_rgb(mcolors.rgb_to_hsv(tt)))
assert_array_almost_equal(tt,
mcolors.rgb_to_hsv(mcolors.hsv_to_rgb(tt)))
@cleanup
def test_autoscale_masked():
# Test for #2336. Previously fully masked data would trigger a ValueError.
data = np.ma.masked_all((12, 20))
plt.pcolor(data)
plt.draw()
def test_colors_no_float():
# Gray must be a string to distinguish 3-4 grays from RGB or RGBA.
def gray_from_float_rgb():
return mcolors.colorConverter.to_rgb(0.4)
def gray_from_float_rgba():
return mcolors.colorConverter.to_rgba(0.4)
assert_raises(ValueError, gray_from_float_rgb)
assert_raises(ValueError, gray_from_float_rgba)
def test_light_source_shading_color_range():
# see also
#http://matplotlib.org/examples/pylab_examples/shading_example.html
from matplotlib.colors import LightSource
from matplotlib.colors import Normalize
refinput = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
norm = Normalize(vmin=0, vmax=50)
ls = LightSource(azdeg=0, altdeg=65)
testoutput = ls.shade(refinput, plt.cm.jet, norm=norm)
refoutput = np.array([
[[0., 0., 0.58912656, 1.],
[0., 0., 0.67825312, 1.],
[0., 0., 0.76737968, 1.],
[0., 0., 0.85650624, 1.]],
[[0., 0., 0.9456328, 1.],
[0., 0., 1., 1.],
[0., 0.04901961, 1., 1.],
[0., 0.12745098, 1., 1.]],
[[0., 0.22156863, 1., 1.],
[0., 0.3, 1., 1.],
[0., 0.37843137, 1., 1.],
[0., 0.45686275, 1., 1.]]
])
assert_array_almost_equal(refoutput, testoutput)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
mit
|
ssaeger/scikit-learn
|
sklearn/utils/deprecation.py
|
77
|
2417
|
import warnings
__all__ = ["deprecated", ]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
|
bsd-3-clause
|
zhenv5/scikit-learn
|
examples/mixture/plot_gmm_pdf.py
|
284
|
1528
|
"""
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
henrykironde/scikit-learn
|
examples/svm/plot_svm_anova.py
|
250
|
2000
|
"""
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/py-sncosmo/package.py
|
3
|
2130
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PySncosmo(PythonPackage):
"""SNCosmo is a Python library for high-level supernova cosmology
analysis."""
homepage = "http://sncosmo.readthedocs.io/"
url = "https://pypi.io/packages/source/s/sncosmo/sncosmo-1.2.0.tar.gz"
version('1.2.0', '028e6d1dc84ab1c17d2f3b6378b2cb1e')
# Required dependencies
# py-sncosmo binaries are duplicates of those from py-astropy
extends('python', ignore=r'bin/.*')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-astropy', type=('build', 'run'))
# Recommended dependencies
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-iminuit', type=('build', 'run'))
depends_on('py-emcee', type=('build', 'run'))
depends_on('py-nestle', type=('build', 'run'))
|
lgpl-2.1
|
aravart/capomate
|
poolmate/test/test_api.py
|
2
|
1786
|
import numpy as np
import StringIO
from poolmate.teach import Runner, SVMLearner, build_options
from sklearn.datasets import make_classification
# TODO: Unneeded?
class ScikitTextLearner:
def __init__(self, scikit_learner):
self.scikit_learner = scikit_learner
def fit(self, yx):
pass
def loss(self, model):
pass
def make_example():
x, y = make_classification(n_samples=100,
n_features=20,
n_informative=2,
n_redundant=2,
n_clusters_per_class=2,
flip_y=0.01)
z = np.concatenate((np.reshape(y, (len(y), 1)), x), axis=1)
return z
def test_numpy_python_api():
z = make_example()
runner = Runner()
learner = SVMLearner(z)
options = build_options(search_budget=10,
teaching_set_size=2)
best_loss, best_set = runner.run_experiment(z, learner, options)
def test_text_python_api():
z = make_example()
runner = Runner()
learner = SVMLearner(z)
options = build_options(search_budget=10,
teaching_set_size=2)
best_loss, best_set = runner.run_experiment(z, learner, options)
def test_log_stream():
z = make_example()
runner = Runner()
learner = SVMLearner(z)
log = StringIO.StringIO()
options = build_options(search_budget=10,
teaching_set_size=2,
log=log)
best_loss, best_set = runner.run_experiment(z, learner, options)
print best_set, best_loss
# is this exactly like other api?
# no this wrapper isn't taking indices
# can we output and plot performance?
# what about doing it for text?
# document
|
mit
|
3manuek/scikit-learn
|
examples/mixture/plot_gmm_classifier.py
|
250
|
3918
|
"""
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
ah-anssi/SecuML
|
SecuML/core/Classification/Monitoring/ROC.py
|
1
|
3654
|
# SecuML
# Copyright (C) 2016-2018 ANSSI
#
# SecuML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# SecuML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with SecuML. If not, see <http://www.gnu.org/licenses/>.
import csv
import matplotlib.pyplot as plt
import numpy as np
from scipy import interp
from sklearn.metrics import roc_curve, auc
import os.path as path
from SecuML.core.Tools import colors_tools
class ROC(object):
def __init__(self, num_folds, conf):
self.mean_tpr = None
self.mean_fpr = np.linspace(0, 1, 100)
self.thresholds = None
self.fig, (self.ax1) = plt.subplots(1, 1)
self.probabilist_model = conf.probabilistModel()
self.num_folds = num_folds
def addFold(self, fold_id, predictions):
if predictions.numInstances() == 0 or sum(predictions.ground_truth) == 0:
return
if self.probabilist_model:
scores = predictions.predicted_proba
else:
scores = predictions.predicted_scores
fpr, tpr, thresholds = roc_curve(predictions.ground_truth, scores)
if self.mean_tpr is None:
self.mean_tpr = interp(self.mean_fpr, fpr, tpr)
else:
self.mean_tpr += interp(self.mean_fpr, fpr, tpr)
self.thresholds = interp(self.mean_fpr, fpr, thresholds)
self.mean_tpr[0] = 0.0
self.thresholds[0] = 1.0
self.thresholds[-1] = 0.0
roc_auc = auc(fpr, tpr)
if self.num_folds > 1:
self.ax1.plot(fpr, tpr, lw=1,
label='ROC fold %d (area = %0.2f)' % (fold_id, roc_auc))
else:
self.ax1.plot(fpr, tpr, lw=3,
color=colors_tools.getLabelColor('all'),
label='ROC (area = %0.2f)' % (roc_auc))
def display(self, directory):
self.plot(path.join(directory, 'ROC.png'))
self.toCsv(path.join(directory, 'ROC.csv'))
def plot(self, output_file):
self.ax1.plot([0, 1], [0, 1], '--', lw=1,
color=(0.6, 0.6, 0.6), label='Luck')
if self.num_folds > 1:
self.mean_tpr /= self.num_folds
self.mean_tpr[-1] = 1.0
mean_auc = auc(self.mean_fpr, self.mean_tpr)
self.ax1.plot(self.mean_fpr, self.mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
self.ax1.set_xlim([-0.05, 1.05])
self.ax1.set_ylim([-0.05, 1.05])
self.ax1.set_xlabel('False Positive Rate')
self.ax1.set_ylabel('True Positive Rate')
self.ax1.set_title('ROC Curve')
self.ax1.legend(loc='lower right')
self.fig.savefig(output_file)
plt.close(self.fig)
def toCsv(self, output_file):
with open(output_file, 'w') as f:
csv_writer = csv.writer(f)
header = ['Threshold', 'False Alarm Rate', 'Detection Rate']
csv_writer.writerow(header)
if self.thresholds is None: # The ROC is not defined
return
for i in range(len(self.mean_fpr)):
row = [self.thresholds[i], self.mean_fpr[i], self.mean_tpr[i]]
csv_writer.writerow(row)
|
gpl-2.0
|
parkhyeonghwa/mtools
|
mtools/test/test_all_import.py
|
7
|
1549
|
from nose.tools import nottest, make_decorator
from functools import wraps
# tools without any external dependencies
from mtools.mlogfilter.mlogfilter import MLogFilterTool
from mtools.mlogvis.mlogvis import MLogVisTool
from mtools.mloginfo.mloginfo import MLogInfoTool
tools = [MLogFilterTool, MLogVisTool, MLogInfoTool]
# mlaunch depends on pymongo
try:
from mtools.mlaunch.mlaunch import MLaunchTool
tools.append(MLaunchTool)
except ImportError:
pass
# mplotqueries depends on matplotlib
try:
from mtools.mplotqueries.mplotqueries import MPlotQueriesTool
tools.append(MPlotQueriesTool)
except ImportError:
pass
def all_tools(fn):
""" This is a decorator for test functions, that runs a loop over all command line tool
classes imported above and passes each class to the test function.
To use this decorator, the test function must accept a single parameter. Example:
@all_tools
def test_something(tool_cls):
tool = tool_cls()
# test tool here ...
"""
@wraps(fn) # copies __name__ of the original function, nose requires the name to start with "test_"
def new_func():
for tool in tools:
fn(tool)
return new_func
def test_import_all():
""" Import all tools from mtools module.
The tools that have external dependencies will only be imported if the dependencies are fulfilled.
This test just passes by default because the imports are tested implicitly by loading this file.
"""
pass
|
apache-2.0
|
bikong2/scikit-learn
|
examples/linear_model/lasso_dense_vs_sparse_data.py
|
348
|
1862
|
"""
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
|
bsd-3-clause
|
robbymeals/scikit-learn
|
sklearn/decomposition/tests/test_truncated_svd.py
|
240
|
6055
|
"""Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
|
bsd-3-clause
|
southerncross/tianchi-monster
|
predict_merge.py
|
1
|
7919
|
# -*- coding: utf-8 -*-
import csv
import sys
import numpy as np
from sklearn import linear_model
from sklearn.ensemble import RandomForestClassifier
import math
import copy
from sets import Set
if len(sys.argv) < 3:
print "Usage: merge.py (train_date) [...train_date] (predict date)"
exit(0)
train_date_num = len(sys.argv) - 2
item_cate_map = {}
fp_item_cate_map = open("E:/tianchi_feature_filtered/tianchi_mobile_recommend_train_item.csv", 'r')
fp_item_cate_map.readline()
for line in fp_item_cate_map:
(ii, geo, ci) = line.strip().split(',')
item_cate_map[int(ii)] = int(ci)
user_habit = {}
fp_user_habit = open("E:/tianchi_feature_filtered/hotness/f_user_full.csv", 'r')
for line in fp_user_habit:
habits = map(lambda x:float(x), line.strip().split(','))
user_habit[habits[0]] = habits[1:]
def load_raw_feature(date):
print "dealing date %d" % date
# how many days of features to be involved
feat_opes_range = (1,2)
feat_files = map(lambda r: "feature_limit_%d_%d.csv"%(date, r), feat_opes_range)
feat_files = map(lambda s: "E:/tianchi_feature_filtered/"+s, feat_files)
# feature 数目
FEAT_SIZE = 4*len(feat_files) + 1
default_feat = [0] * FEAT_SIZE
# 特征提取
ui_feature = {}
for i in range(0, len(feat_files)):
print "\r"+feat_files[i],
sys.stdout.flush()
fp = open(feat_files[i], 'r')
for line in fp:
(ui, ii, f1, f2, f3, f4) = map(lambda x:int(x), line.strip().split(','))
if (ui, ii) not in ui_feature:
ui_feature[(ui, ii)] = default_feat[:]
ui_feature[(ui, ii)][i*4 + 1] = f1
ui_feature[(ui, ii)][i*4 + 2] = f2
ui_feature[(ui, ii)][i*4 + 3] = f3
ui_feature[(ui, ii)][i*4 + 4] = f4
fp.close()
print ""
# tag提取
#print "\ndealing tag"
fp = open("E:/tianchi_feature_filtered/tag_limit_%d.csv"%date, 'r')
for line in fp:
(ui, ii, tag) = map(lambda x:int(x), line.strip().split(','))
if (ui, ii) in ui_feature:
# 如果买了就是1,不管买了多少
ui_feature[(ui, ii)][0] = 1 if tag != 0 else 0
fp.close()
return copy.deepcopy(ui_feature)
def balance_pos_neg(ui_feature, proportion = 1.0):
"""balance positive and negetive data
Args:
ui_feature: init data
proportion: neg / pos
"""
#print "balancing positive and negetive data"
pos_count = 0
neg_count = 0
ret_feature = {}
for k in ui_feature:
# 处理数据倾斜
v = ui_feature[k]
if v[0] == 1:
pos_count += 1
elif v[0] == 0:
if neg_count >= pos_count * proportion:
continue
neg_count += 1
ret_feature[k] = v
#print pos_count, neg_count, pos_count + neg_count
return copy.deepcopy(ret_feature)
def add_global_feature(ui_feature, date):
#print "adding global features of date %d" % date
item_hotness = {}
user_behavior = {}
cate_hotness = {}
fp_item_hotness = open("E:/tianchi_feature_filtered/hotness/hotness_item_%d.csv" % date, 'r')
for line in fp_item_hotness:
(ii, f_all, h1, h2, h3, h4) = map(lambda x:float(x), line.strip().split(','))
item_hotness[ii] = [h1, h2, h3, h4]
fp_user_behavior = open("E:/tianchi_feature_filtered/hotness/hotness_user_%d.csv" % date, 'r')
for line in fp_user_behavior:
(ui, f_all, h1, h2, h3, h4) = map(lambda x:float(x), line.strip().split(','))
user_behavior[ui] = [h1, h2, h3, h4]
fp_cate_hotness = open("E:/tianchi_feature_filtered/hotness/hotness_category_%d.csv" % date, 'r')
for line in fp_cate_hotness:
(ci, f_all, h1, h2, h3, h4) = map(lambda x:float(x), line.strip().split(','))
cate_hotness[ci] = [h1, h2, h3, h4]
for k in ui_feature:
v = ui_feature[k]
v += user_behavior[k[0]] if k[0] in user_behavior else [0,0,0,0]
v += item_hotness[k[1]] if k[1] in item_hotness else [0,0,0,0]
v += cate_hotness[item_cate_map[k[1]]] if k[1] in item_cate_map and item_cate_map[k[1]] in cate_hotness else [0,0,0,0]
v += user_habit[k[0]]
def add_u_i_feature(ui_feature, date):
return
"""user item features from shun, 7 dimensions
"""
curr_feat = {}
fp_u_i_feature = open("E:/tianchi_feature_filtered/hotness/user_item_%d.csv" % date, 'r')
for line in fp_u_i_feature:
feat_items = map(lambda x:float(x), line.strip().split(','))
curr_feat[tuple(feat_items[:2])] = feat_items[6:]
for k in ui_feature:
v = ui_feature[k]
v += curr_feat[k]
def delete_yesterday_buy(ui_pre, res, date):
print "delete bought items yesterday"
# 加载昨天购买的信息
yes_buy = Set()
with open("E:/tianchi_feature_filtered/tag_limit_%d.csv"%(date-1), 'r') as yes_fp:
for line in yes_fp:
(ui, ii, tag) = map(lambda x:int(x), line.strip().split(','))
if (ui, ii) not in yes_buy:
yes_buy.add((ui, ii))
with open("E:/tianchi_feature_filtered/tag_limit_%d.csv"%(date-2), 'r') as yes_fp:
for line in yes_fp:
(ui, ii, tag) = map(lambda x:int(x), line.strip().split(','))
if (ui, ii) not in yes_buy:
yes_buy.add((ui, ii))
for i in range(len(res)):
if ui_pre[i] in yes_buy:
res[i] = 0
# 加载多天的数据
print "load train features of dates: ", sys.argv[1:train_date_num+1]
ui_train = []
feat_train = []
for di in range(1, train_date_num+1):
date1 = int(sys.argv[di])
# "deal with day %d" % date1
ui_raw_feat_train = load_raw_feature(date1)
ui_feat_train = balance_pos_neg(ui_raw_feat_train, 12)
add_global_feature(ui_feat_train, date1)
add_u_i_feature(ui_feat_train, date1)
for k in ui_feat_train:
ui_train.append(list(k))
feat_train.append(ui_feat_train[k])
print "load finished with entries: ", len(ui_train), len(feat_train)
feat_data = np.array(feat_train)
print "feat dimension: ", len(feat_data[0,1:])
logistic = linear_model.LogisticRegression(penalty='l1')
#logistic = RandomForestClassifier(n_estimators=8, random_state=1)
logistic.fit(feat_data[:, 1:], feat_data[:, 0])
print 'Fit finished'
# "deal with day %d" % date2
date2 = int(sys.argv[-1])
ui_feat_pre = load_raw_feature(date2)
add_global_feature(ui_feat_pre, date2)
add_u_i_feature(ui_feat_pre, date2)
print "One entry of date %d for verification" % date2
for k in ui_feat_pre:
print k, ui_feat_pre[k]
break
ui_pre = []
feat_pre = []
for k in ui_feat_pre:
ui_pre.append(k)
feat_pre.append(ui_feat_pre[k])
feat_new = np.array(feat_pre)
print len(feat_new)
res = logistic.predict(feat_new[:, 1:])
print 'Predict finished'
# 剔除昨天刚刚购买过的数据
delete_yesterday_buy(ui_pre, res, date2)
if sys.argv[-1] != '32':
# 加载真实结果集
pos_set = Set()
with open("E:/tianchi_feature_filtered/tag_limit_%d.csv"%date2, 'r') as pos_fp:
for line in pos_fp:
(ui, ii, tag) = map(lambda x:int(x), line.strip().split(','))
if (ui, ii) not in pos_set:
pos_set.add((ui, ii))
count = 0
hit = 0
positive = 0
true_pos = len(pos_set)
for i in range(len(res)):
count += 1
if res[i] != 0 and ui_pre[i] in pos_set:
hit += 1
if res[i] != 0:
positive += 1
precision = float(hit)/positive
recf_all = float(hit)/true_pos
f1 = 2 * precision * recf_all / (precision + recf_all)
print "count=%d, hit=%d, pos=%d, true_pos=%d, precision=%f, recf_all=%f, f1=%f" % (count, hit, positive, true_pos, precision, recf_all, f1)
else:
fw = open("pred_res.csv", "wb")
csvw = csv.writer(fw)
count = 0
for k in ui_pre:
if res[count] != 0:
csvw.writerow(k)
count += 1
|
mit
|
spallavolu/scikit-learn
|
benchmarks/bench_covertype.py
|
120
|
7381
|
"""
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
|
bsd-3-clause
|
pthaike/SFrame
|
oss_src/unity/python/sframe/test/test_io.py
|
9
|
15399
|
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
import commands
import json
import logging
import os
import re
import tempfile
import unittest
import pandas
import sys
from ..connect import main as glconnect
from .. import sys_util as _sys_util
from .. import util
from .. import SGraph, Model, SFrame, load_graph, load_model, load_sframe
from create_server_util import create_server, start_test_tcp_server
from pandas.util.testing import assert_frame_equal
restricted_place = '/root'
if sys.platform == 'win32':
restricted_place = 'C:/Windows/System32/config/RegBack'
def _test_save_load_object_helper(testcase, obj, url):
"""
Helper function to test save and load a server side object to a given url.
"""
def cleanup(url):
"""
Remove the saved file from temp directory.
"""
protocol = None
path = None
splits = url.split("://")
if len(splits) > 1:
protocol = splits[0]
path = splits[1]
else:
path = url
if not protocol or protocol is "local" or protocol is "remote":
tempdir = tempfile.gettempdir()
pattern = path + ".*"
for f in os.listdir(tempdir):
if re.search(pattern, f):
os.remove(os.path.join(tempdir, f))
if isinstance(obj, SGraph):
obj.save(url + ".graph")
newobj = load_graph(url + ".graph")
testcase.assertItemsEqual(obj.get_fields(), newobj.get_fields())
testcase.assertDictEqual(obj.summary(), newobj.summary())
elif isinstance(obj, Model):
obj.save(url + ".model")
newobj = load_model(url + ".model")
testcase.assertItemsEqual(obj.list_fields(), newobj.list_fields())
testcase.assertEqual(type(obj), type(newobj))
elif isinstance(obj, SFrame):
obj.save(url + ".frame_idx")
newobj = load_sframe(url + ".frame_idx")
testcase.assertEqual(obj.shape, newobj.shape)
testcase.assertEqual(obj.column_names(), newobj.column_names())
testcase.assertEqual(obj.column_types(), newobj.column_types())
assert_frame_equal(obj.head(obj.num_rows()).to_dataframe(),
newobj.head(newobj.num_rows()).to_dataframe())
else:
raise TypeError
cleanup(url)
def create_test_objects():
vertices = pandas.DataFrame({'vid': ['1', '2', '3'],
'color': ['g', 'r', 'b'],
'vec': [[.1, .1, .1], [.1, .1, .1], [.1, .1, .1]]})
edges = pandas.DataFrame({'src_id': ['1', '2', '3'],
'dst_id': ['2', '3', '4'],
'weight': [0., 0.1, 1.]})
graph = SGraph().add_vertices(vertices, 'vid').add_edges(edges, 'src_id', 'dst_id')
sframe = SFrame(edges)
return (graph, sframe)
class LocalFSConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe) = create_test_objects()
def _test_read_write_helper(self, url, content):
url = util._make_internal_url(url)
glconnect.get_unity().__write__(url, content)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content)
if os.path.exists(url):
os.remove(url)
def test_object_save_load(self):
for prefix in ['', 'local://', 'remote://']:
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
def test_basic(self):
self._test_read_write_helper(self.tempfile, 'hello world')
self._test_read_write_helper("local://" + self.tempfile + ".csv", 'hello,world,woof')
self._test_read_write_helper("remote://" + self.tempfile + ".csv", 'hello,world,woof')
def test_gzip(self):
self._test_read_write_helper(self.tempfile + ".gz", 'hello world')
self._test_read_write_helper(self.tempfile + ".csv.gz", 'hello world')
self._test_read_write_helper("local://" + self.tempfile + ".csv.gz", 'hello world')
self._test_read_write_helper("remote://" + self.tempfile + ".csv.gz", 'hello world')
def test_exception(self):
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__(restricted_place+"/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(restricted_place+"/tmp", '.....'))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__(restricted_place+"/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(restricted_place+"/tmp", '.....'))
self.assertRaises(IOError, lambda: self.graph.save(restricted_place+"/tmp.graph"))
self.assertRaises(IOError, lambda: self.sframe.save(restricted_place+"/tmp.frame_idx"))
self.assertRaises(IOError, lambda: load_graph(restricted_place+"/tmp.graph"))
self.assertRaises(IOError, lambda: load_sframe(restricted_place+"/tmp.frame_idx"))
self.assertRaises(IOError, lambda: load_model(restricted_place+"/tmp.model"))
class RemoteFSConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
glconnect.stop()
auth_token = 'graphlab_awesome'
self.server = start_test_tcp_server(auth_token=auth_token)
glconnect.launch(self.server.get_server_addr(), auth_token=auth_token)
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe) = create_test_objects()
@classmethod
def tearDownClass(self):
glconnect.stop()
self.server.stop()
def _test_read_write_helper(self, url, content):
url = util._make_internal_url(url)
glconnect.get_unity().__write__(url, content)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content)
def test_basic(self):
self._test_read_write_helper("remote://" + self.tempfile, 'hello,world,woof')
def test_gzip(self):
self._test_read_write_helper("remote://" + self.tempfile + ".csv.gz", 'hello,world,woof')
def test_object_save_load(self):
prefix = "remote://"
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
@unittest.skipIf(sys.platform == 'win32', "This for some reason doesn't pass on Jenkins")
def test_exception(self):
self.assertRaises(ValueError, lambda: self._test_read_write_helper(self.tempfile, 'hello world'))
self.assertRaises(ValueError, lambda: self._test_read_write_helper("local://" + self.tempfile + ".csv.gz", 'hello,world,woof'))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("remote://"+restricted_place+"/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("remote://"+restricted_place+"/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("remote://"+restricted_place+"/tmp", '.....'))
self.assertRaises(IOError, lambda: self.graph.save("remote://"+restricted_place+"/tmp.graph"))
self.assertRaises(IOError, lambda: self.sframe.save("remote://"+restricted_place+"/tmp.frame_idx"))
self.assertRaises(IOError, lambda: load_graph("remote://"+restricted_place+"/tmp.graph"))
self.assertRaises(IOError, lambda: load_sframe("remote://"+restricted_place+"/tmp.frame_idx"))
self.assertRaises(IOError, lambda: load_model("remote://"+restricted_place+"/tmp.model"))
class HttpConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
def _test_read_helper(self, url, content_expected):
url = util._make_internal_url(url)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content_expected)
def test_read(self):
expected = "\n".join([str(unichr(i + ord('a'))) for i in range(26)])
expected = expected + "\n"
self._test_read_helper(self.url, expected)
def test_exception(self):
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(self.url, '.....'))
@unittest.skip("Disabling HDFS Connector Tests")
class HDFSConnectorTests(unittest.TestCase):
# This test requires hadoop to be installed and avaiable in $PATH.
# If not, the tests will be skipped.
@classmethod
def setUpClass(self):
self.has_hdfs = len(_sys_util.get_hadoop_class_path()) > 0
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe) = create_test_objects()
def _test_read_write_helper(self, url, content_expected):
url = util._make_internal_url(url)
glconnect.get_unity().__write__(url, content_expected)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content_expected)
# clean up the file we wrote
status, output = commands.getstatusoutput('hadoop fs -test -e ' + url)
if status is 0:
commands.getstatusoutput('hadoop fs -rm ' + url)
def test_basic(self):
if self.has_hdfs:
self._test_read_write_helper("hdfs://" + self.tempfile, 'hello,world,woof')
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_gzip(self):
if self.has_hdfs:
self._test_read_write_helper("hdfs://" + self.tempfile + ".gz", 'hello,world,woof')
self._test_read_write_helper("hdfs://" + self.tempfile + ".csv.gz", 'hello,world,woof')
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_object_save_load(self):
if self.has_hdfs:
prefix = "hdfs://"
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_exception(self):
bad_url = "hdfs:///root/"
if self.has_hdfs:
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs:///"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs:///tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs://" + self.tempfile))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(bad_url + "/tmp", "somerandomcontent"))
self.assertRaises(IOError, lambda: self.graph.save(bad_url + "x.graph"))
self.assertRaises(IOError, lambda: self.sframe.save(bad_url + "x.frame_idx"))
self.assertRaises(IOError, lambda: load_graph(bad_url + "mygraph"))
self.assertRaises(IOError, lambda: load_sframe(bad_url + "x.frame_idx"))
self.assertRaises(IOError, lambda: load_model(bad_url + "x.model"))
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
@unittest.skip("Disabling S3 Connector Tests")
class S3ConnectorTests(unittest.TestCase):
# This test requires aws cli to be installed. If not, the tests will be skipped.
@classmethod
def setUpClass(self):
status, output = commands.getstatusoutput('aws s3api list-buckets')
self.has_s3 = (status is 0)
self.standard_bucket = None
self.regional_bucket = None
# Use aws cli s3api to find a bucket with "gl-testdata" in the name, and use it as out test bucket.
# Temp files will be read from /written to the test bucket's /tmp folder and be cleared on exist.
if self.has_s3:
try:
json_output = json.loads(output)
bucket_list = [b['Name'] for b in json_output['Buckets']]
assert 'gl-testdata' in bucket_list
assert 'gl-testdata-oregon' in bucket_list
self.standard_bucket = 'gl-testdata'
self.regional_bucket = 'gl-testdata-oregon'
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe) = create_test_objects()
except:
logging.getLogger(__name__).warning("Fail parsing ioutput of s3api into json. Please check your awscli version.")
self.has_s3 = False
def _test_read_write_helper(self, url, content_expected):
s3url = util._make_internal_url(url)
glconnect.get_unity().__write__(s3url, content_expected)
content_read = glconnect.get_unity().__read__(s3url)
self.assertEquals(content_read, content_expected)
(status, output) = commands.getstatusoutput('aws s3 rm --region us-west-2 ' + url)
if status is not 0:
logging.getLogger(__name__).warning("Cannot remove file: " + url)
def test_basic(self):
if self.has_s3:
for bucket in [self.standard_bucket, self.regional_bucket]:
self._test_read_write_helper("s3://" + bucket + self.tempfile, 'hello,world,woof')
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_gzip(self):
if self.has_s3:
self._test_read_write_helper("s3://" + self.standard_bucket + self.tempfile + ".gz", 'hello,world,woof')
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_object_save_load(self):
if self.has_s3:
prefix = "s3://" + self.standard_bucket
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_exception(self):
if self.has_s3:
bad_bucket = "i_am_a_bad_bucket"
prefix = "s3://" + bad_bucket
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3:///"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3://" + self.standard_bucket + "/somerandomfile"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3://" + "/somerandomfile"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("s3://" + "/somerandomfile", "somerandomcontent"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("s3://" + self.standard_bucket + "I'amABadUrl/", "somerandomcontent"))
self.assertRaises(IOError, lambda: self.graph.save(prefix + "/x.graph"))
self.assertRaises(IOError, lambda: self.sframe.save(prefix + "/x.frame_idx"))
self.assertRaises(IOError, lambda: load_graph(prefix + "/x.graph"))
self.assertRaises(IOError, lambda: load_sframe(prefix + "/x.frame_idx"))
self.assertRaises(IOError, lambda: load_model(prefix + "/x.model"))
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
|
bsd-3-clause
|
JohanComparat/nbody-npt-functions
|
bin/bin_onePT/extra/mivr-3-completeness.py
|
2
|
7577
|
import glob
import sys
import cPickle
from os.path import join
import numpy as n
import astropy.io.fits as fits
import os
import matplotlib
#matplotlib.use('pdf')
matplotlib.rcParams['font.size']=12
import matplotlib.pyplot as p
from scipy.optimize import minimize
fun = lambda lg_X, lg_A, lg_X0, lg_alpha, lg_beta : n.log10( 10**lg_A * (10**lg_X/10**lg_X0)**(-10**lg_beta) * n.e**(- (10**lg_X/10**lg_X0)**(10**lg_alpha) ) )
funG = lambda lg_X, lg_z, ps : fun( lg_X, ps[0], ps[1], ps[2], ps[3] ) #
NDecimal = 3
dir='..'
qty = 'M200c'
dir_04 = join(dir,"MD_0.4Gpc")
dir_10 = join(dir,"MD_1Gpc")
dir_25 = join(dir,"MD_2.5Gpc")
dir_40 = join(dir,"MD_4Gpc")
dir_25N = join(dir,"MD_2.5GpcNW")
dir_40N = join(dir,"MD_4GpcNW")
data = fits.open( join(dir, qty, "MD_M200c_summary.fits") )[1].data
errorLog = 0.03
NminCount = 0
Npmin = 3
limits_04 = [Npmin*9.63 * 10**7, 5e12]
limits_10 = [Npmin*1.51 * 10**9., 5e13]
limits_25 = [Npmin*2.359 * 10**10., 5e14]
limits_40 = [Npmin* 9.6 * 10**10. , 5e15]
MPART = n.array([9.63 * 10**7, 1.51 * 10**9, 2.359 * 10**10, 9.6 * 10**10])
names = n.array(["SMD", "MDPL", "BigMD", "HMD", "BigMDNW", "HMDNW"])
zmin = -0.001
zmax = 0.001
def GetLim(xx , ratio, xTr , lims):
lowX = (xx<xTr)
highX = (xx>xTr)
#X_up = n.array([ n.min(xx[(highX)&(ratio<lim)]) for lim in lims ])
X_low = n.array([ n.max(xx[(lowX)&(ratio<lim)]) for lim in lims ])
return X_low # X_up,
def getCompleteness(qty = 'M200c', cos = "cen", zmin = -0.01, zmax = 0.1):
"""
Plots the data to be used in the fits later in the analysis.
"""
# gets the best fitting parameters at redshift 0 :
f=open(join(dir,qty,"M200c-"+cos+"-cumulative-function-z0-params.pkl"), 'r')
res = cPickle.load(f)
f.close()
pOpt = res.x
cov = res.direc
# redshift selection
zSel = (data["redshift"]>zmin)&(data["redshift"]<zmax)
# minimum number counts selection
nSel = (data['dN_counts_'+cos]>NminCount)
#start the figure
p.figure(1,(6,6))
p.axes([0.17,0.17,0.75,0.75])
# mass selection and plots for each box :
if cos == "cen":
mSel = ((data["boxLength"]==400.)&(data["log_"+qty+"_min"]>n.log10(limits_04[0])))
ok = (zSel) & (mSel) & (nSel)
lg_M200c = (data["log_"+qty+"_min"][ok]+data["log_"+qty+"_max"][ok])/2.
lg_MF_c = n.log10(data["dNdVdlnM_"+cos+"_c"][ok])
lg_1pz = n.log10(1+ data["redshift"][ok])
X_low_04 = GetLim(xx = lg_M200c, ratio = 10**(lg_MF_c - funG(lg_M200c,lg_1pz, pOpt)), xTr = 2*limits_04[0], lims = [0.8, 0.9, 0.95, 0.97])
p.plot(lg_M200c[::3], 10**(lg_MF_c[::3] - funG(lg_M200c[::3],lg_1pz[::3], pOpt)), marker ='o', mfc='None',mec='r',ls='none', label="SMD", rasterized=True)
mSel = ((data["boxLength"]==1000.)&(data["log_"+qty+"_min"]>n.log10(limits_10[0])))
ok = (zSel) & (mSel) & (nSel)
lg_M200c = (data["log_"+qty+"_min"][ok]+data["log_"+qty+"_max"][ok])/2.
lg_MF_c = n.log10(data["dNdVdlnM_"+cos+"_c"][ok])
lg_1pz = n.log10(1+ data["redshift"][ok])
X_low_10 = GetLim(xx = lg_M200c, ratio = 10**(lg_MF_c - funG(lg_M200c,lg_1pz, pOpt)), xTr = 2*limits_10[0], lims = [0.8, 0.9, 0.95, 0.97])
p.plot(lg_M200c[::3], 10**(lg_MF_c[::3] - funG(lg_M200c[::3],lg_1pz[::3], pOpt)), marker ='v', mfc='None',mec='c',ls='none', label="MDPL", rasterized=True)
mSel = ((data["boxLength"]==2500.)&(data["log_"+qty+"_min"]>n.log10(limits_25[0])))
ok = (zSel) & (mSel) & (nSel)
lg_M200c = (data["log_"+qty+"_min"][ok]+data["log_"+qty+"_max"][ok])/2.
lg_MF_c = n.log10(data["dNdVdlnM_"+cos+"_c"][ok])
lg_1pz = n.log10(1+ data["redshift"][ok])
X_low_25 = GetLim(xx = lg_M200c, ratio = 10**(lg_MF_c - funG(lg_M200c,lg_1pz, pOpt)), xTr = 2*limits_25[0], lims = [0.8, 0.9, 0.95, 0.97])
p.plot(lg_M200c[::3], 10**(lg_MF_c[::3] - funG(lg_M200c[::3],lg_1pz[::3], pOpt)), marker ='s', mfc='None',mec='m',ls='none', label="BigMD", rasterized=True)
mSel = ((data["boxLength"]==4000.)&(data["log_"+qty+"_min"]>n.log10(limits_40[0])))
ok = (zSel) & (mSel) & (nSel)
lg_M200c = (data["log_"+qty+"_min"][ok]+data["log_"+qty+"_max"][ok])/2.
lg_MF_c = n.log10(data["dNdVdlnM_"+cos+"_c"][ok])
lg_1pz = n.log10(1+ data["redshift"][ok])
X_low_40 = GetLim(xx = lg_M200c, ratio = 10**(lg_MF_c - funG(lg_M200c,lg_1pz, pOpt)), xTr = 2*limits_40[0], lims = [0.8, 0.9, 0.95, 0.97])
p.plot(lg_M200c, 10**(lg_MF_c - funG(lg_M200c,lg_1pz, pOpt)), marker ='+', mfc='None',mec='b',ls='none', label="HMD", rasterized=True)
p.axvline(n.log10(MPART[0]*100.), color='r')
p.axvline(n.log10(MPART[1]*100.), color='c')
p.axvline(n.log10(MPART[2]*100.), color='m')
p.axvline(n.log10(MPART[3]*100.), color='b')
p.xlim((9.5,16))
lims = n.array([X_low_04, X_low_10, X_low_25, X_low_40]) # [X_up_04, X_up_10, X_up_25, X_up_40],
if cos == "sat":
mSel = ((data["boxLength"]==400.)&(data["log_"+qty+"_min"]>n.log10(limits_04[0])))
ok = (zSel) & (mSel) & (nSel)
lg_M200c = (data["log_"+qty+"_min"][ok]+data["log_"+qty+"_max"][ok])/2.
lg_MF_c = n.log10(data["dNdVdlnM_"+cos+"_c"][ok])
lg_1pz = n.log10(1+ data["redshift"][ok])
X_low_04 = GetLim(xx = lg_M200c, ratio = 10**(lg_MF_c - funG(lg_M200c,lg_1pz, pOpt)), xTr = 2*limits_04[0], lims = [0.8, 0.9, 0.95, 0.97])
p.plot(lg_M200c[::3], 10**(lg_MF_c[::3] - funG(lg_M200c[::3],lg_1pz[::3], pOpt)), marker ='o', mfc='None',mec='r',ls='none', label="SMD", rasterized=True)
mSel = ((data["boxLength"]==1000.)&(data["log_"+qty+"_min"]>n.log10(limits_10[0])))
ok = (zSel) & (mSel) & (nSel)
lg_M200c = (data["log_"+qty+"_min"][ok]+data["log_"+qty+"_max"][ok])/2.
lg_MF_c = n.log10(data["dNdVdlnM_"+cos+"_c"][ok])
lg_1pz = n.log10(1+ data["redshift"][ok])
X_low_10 = GetLim(xx = lg_M200c, ratio = 10**(lg_MF_c - funG(lg_M200c,lg_1pz, pOpt)), xTr = 2*limits_10[0], lims = [0.8, 0.9, 0.95, 0.97])
p.plot(lg_M200c[::3], 10**(lg_MF_c[::3] - funG(lg_M200c[::3],lg_1pz[::3], pOpt)), marker ='v', mfc='None',mec='c',ls='none', label="MDPL", rasterized=True)
p.axvline(n.log10(MPART[0]*100.), color='r')
p.axvline(n.log10(MPART[1]*100.), color='c')
p.xlim((9.5,16))
lims = n.array([X_low_04, X_low_10]) # [X_up_04, X_up_10, X_up_25, X_up_40],
p.axhline(1)
p.xlabel(r'log$_{10}[M_{200c}/(h^{-1}M_\odot)]$')
p.ylabel(r' n('+cos+',>M) data / model') # log$_{10}[ n(>M)]')
gl = p.legend(loc=4,fontsize=12)
gl.set_frame_on(False)
p.ylim((.9,1.1))
#p.title(str(n.round(n.mean(redshift),NDecimal))+" "+str(A0[0])+" "+str(vcut0[0])+" "+str(a0[0])+" "+str(b0[0]))
p.grid()
p.savefig(join(dir,qty,"M200c-"+cos+"-completeness.png"))
p.clf()
return lims
limC = getCompleteness(qty = 'M200c', cos = "cen", zmin = -0.01, zmax = 0.01)
limS = getCompleteness(qty = 'M200c', cos = "sat", zmin = -0.01, zmax = 0.01)
f=open( join( dir, qty, "completeness-" + qty + "-npart-z0.txt" ), 'w' )
f.write( " central \n")
for ii, el in enumerate(limC):
f.write( names[ii]+" & " +str(n.round(n.log10(el[0]),NDecimal))+ " ("+str(int(el[0]/MPART[ii]))+ ") & " + str(n.round(n.log10(el[1]),NDecimal))+ " ("+str(int(el[1]/MPART[ii]))+ ") & " + str(n.round(n.log10(el[2]),NDecimal))+ " ("+str(int(el[2]/MPART[ii]))+") & " + str(n.round(n.log10(el[3]),NDecimal))+ " ("+str(int(el[3]/MPART[ii]))+ ") \\\\ \n")
f.write( " sat \n")
for ii, el in enumerate(limS):
f.write( names[ii]+" & " +str(n.round(n.log10(el[0]),NDecimal)) + " ("+str(int(el[0]/MPART[ii]))+ ") & " + str(n.round(n.log10(el[1]),NDecimal)) + " ("+str(int(el[1]/MPART[ii]))+ ") & " + str(n.round(n.log10(el[2]),NDecimal)) + " ("+str(int(el[2]/MPART[ii]))+ ") & " + str(n.round(n.log10(el[3]),NDecimal)) + " ("+str(int(el[3]/MPART[ii]))+ ")\\\\ \n")
f.close()
|
cc0-1.0
|
ArcticSnow/snowpyt
|
snowpyt/JC_pit_class.py
|
3
|
7742
|
from __future__ import division
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Include class and function here
class Snowpit_svalbard_JC(Snowpit):
'''
Class for snowpit data as sorted by JC about Snow Svalbard Research
'''
def __init__(self, filename):
super(Snowpit, self).__init__()
self.filename = 'example.txt'
def summary_plot(self, save=False):
'''
plot general snowpit plot
:return:
'''
fig = plt.figure(figsize=(10, 10), dpi=150)
# fig = plt.figure()
ax1 = plt.subplot2grid((4,4),(0,0),rowspan = 3)
ax2 = plt.subplot2grid((4,4),(0,1),rowspan = 3, sharey=ax1) # Share y-axes with subplot 1
ax3 = plt.subplot2grid((4,4),(0,2),rowspan = 3, sharey=ax2)
ax4 = plt.subplot2grid((4,4),(0,3),rowspan = 3, sharey=ax3)
# Set y-ticks of subplot 2 invisible
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax3.get_yticklabels(), visible=False)
plt.setp(ax4.get_yticklabels(), visible=False)
# Plot data
fig.gca().invert_yaxis()
im1 = ax1.plot(-self.temperature_snow, self.temperature_depth)
im2 = ax2.barh(self.layer_top, np.repeat(1, self.layer_top.__len__()), self.layer_bot - self.layer_top,
color=cm.Blues(self.hardness_code / 7))
ax2.set_xlim(0, 1)
# include symbols
for i, flake in enumerate(self.grain_type1.astype(str)):
if flake != 'nan':
im = plt.imread(snowflake_dict.get(flake))
im[im == 0] = np.nan
imagebox = OffsetImage(im, zoom=.02)
if self.grain_type2.astype(str)[i] == 'nan':
hloc = 0.5
else:
hloc = 0.33
xy = [hloc,
((self.layer_top[i] - self.layer_bot[i]) / 2 + self.layer_bot[i])] # coordinates to position this image
ab = AnnotationBbox(imagebox, xy, xycoords='data', boxcoords='data', frameon=False)
ax2.add_artist(ab)
for i, flake in enumerate(self.grain_type2.astype(str)):
if flake != 'nan':
im = plt.imread(snowflake_dict.get(flake))
im[im == 0] = np.nan
imagebox = OffsetImage(im, zoom=.02)
xy = [0.66,
((self.layer_top[i] - self.layer_bot[i]) / 2 + self.layer_bot[i])] # coordinates to position this image
ab = AnnotationBbox(imagebox, xy, xycoords='data', boxcoords='data', frameon=False)
ax2.add_artist(ab)
im3 = ax3.barh(self.layer_top, self.hardness_code, self.layer_bot - self.layer_top, color=cm.Blues(self.hardness_code / 6))
ax3.set_xlim(0, 7)
im4 = ax4.plot(self.density, self.density_depth)
ax4.yaxis.tick_right()
# add
ax1.set_title("Temperature ($^\circ$C)")
ax2.set_title("Stratigraphy")
ax3.set_title("Hardness")
ax4.set_title("Density")
ax1.set_ylabel("Depth (cm)")
ax1.grid()
ax4.grid()
metadata = "Date: " + self.date + '\n' + \
"Observer: " + self.Observer + '\n' + \
"Glacier: " + self.glacier + '\n' + \
"East : " + self.East + '\n' + \
"North: " + self.North + '\n' + \
"Elevation: " + self.Elevation + '\n' + \
"Weather Conditions: " + self.weather_conditions + '\n' + \
"Air temperature: " + self.AirTemp + '\n' + \
"Comments: " + self.comments + '\n'
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
plt.figtext(0.08, 0.15 , metadata,
horizontalalignment='left',
verticalalignment='center',wrap=True, fontsize=6)
#fig.autofmt_xdate()
plt.tight_layout()
plt.subplots_adjust(wspace=0)
if save==True:
fig.savefig(self.filename.split('/')[-1][0:-4])
def plot_temperature(self):
'''
Plot temperature profile
TODO:
- reverse depth axis
'''
plt.figure()
plt.plot(self.temperature_snow, self.temperature_depth)
plt.gca().invert_yaxis()
plt.xlabel('Temperature (C)')
plt.ylabel('Depth (cm)')
plt.title('Temperature profile')
plt.grid()
def plot_density(self):
'''
Plot density profile
TODO:
- reverse depth axis
'''
plt.figure()
plt.plot(self.density, self.density_depth)
plt.xlabel('Density (kg/m3)')
plt.ylabel('Depth (cm)')
plt.title('Density profile')
plt.grid()
def load_csv(self):
self.load_metadata()
self.load_profile()
def load_profile(self):
self.profile_raw_table = pd.read_csv(self.filename, sep='\t', skiprows=14)
self.layerID = self.profile_raw_table['Layer']
self.layer_top = self.profile_raw_table['Top [cm]']
self.layer_bot = self.profile_raw_table['Bottom [cm]']
self.grain_type1 = self.profile_raw_table['Type 1']
self.grain_type2 = self.profile_raw_table['Type 2']
self.grain_type2 = self.profile_raw_table['Type 3']
self.grain_size_min = self.profile_raw_table['Diameter min [mm]']
self.grain_size_max = self.profile_raw_table['Diameter max [mm]']
self.hardness = self.profile_raw_table['Hardness']
self.hardness_code = self.profile_raw_table['Hardness code']
self.density_depth = self.profile_raw_table['Depth Center [cm]']
self.density = self.profile_raw_table['Snow Density [g/cm3]']
self.temperature_depth = self.profile_raw_table['Depth [cm]']
self.temperature_snow = self.profile_raw_table['Temp [deg C]']
self.depth_sample = self.profile_raw_table['Depth Center [cm].1']
self.name_sample = self.profile_raw_table['ID_sample']
def load_metadata(self):
f = open(self.filename)
try:
for i, line in enumerate(f):
if line[0:4] == 'Date':
self.date = line.split("\t")[1]
self.East = line.split("\t")[6]
if line[0:4] == 'Time':
self.Elevation = line.split("\t")[5]
if line[0:4] == 'Area':
self.North = line.split("\t")[7]
if line[0:4] == 'Obse':
self.Observer = line.split("\t")[1]
if line[0:4] == 'Air ':
self.AirTemp = line.split("\t")[2]
if line[0:4] == 'Glac':
self.glacier = line.split("\t")[1]
if line[0:4] == 'Glac':
self.weather_conditions = line.split("\t")[4]
if line[0:4] == 'Gene':
self.comments = line.split("\t")[1]
except ValueError:
print "Could not load metadata. Check file formating"
f.close()
def print_metadata(self):
print "Date: " + self.date
print "East [deg]: " + self.East
print "North [deg]: " + self.North
print "Elevation [m]: " + self.Elevation
print "Observer: " + self.Observer
print "Air temperature [C]: " + self.AirTemp
print "Glacier: " + self.glacier
print "Weather conditions: " + self.weather_conditions
print "Comments: " + self.comments
# Include script in this if statement
if __name__ == '__main__':
|
mit
|
taedla01/MissionPlanner
|
Lib/site-packages/numpy/lib/polynomial.py
|
58
|
35930
|
"""
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError, "input must be 1d or square 2d array."
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0,roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with
a given sequence of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] Wikipedia, "Companion matrix",
http://en.wikipedia.org/wiki/Companion_matrix
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError, "Order of integral must be positive (see polyder)"
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError, \
"k must be a scalar or a rank-1 array of length 1 or >m."
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError, "Order of derivative must be positive (see polyint)"
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
...
x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if x.shape[0] != y.shape[0] :
raise TypeError, "expected x and y to have same length"
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# scale x to improve condition number
scale = abs(x).max()
if scale != 0 :
x /= scale
# solve least squares equation for powers of x
v = vander(x, order)
c, resids, rank, s = lstsq(v, y, rcond)
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
# scale returned coefficients
if scale != 0 :
if c.ndim == 1 :
c /= vander([scale], order)[0]
else :
c /= vander([scale], order).T
if full :
return c, resids, rank, s, rcond
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while 1:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError, "Polynomial must be 1d only."
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError, "Power to non-negative integers only."
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError, "Attributes cannot be changed this way."
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError, "Does not support negative powers."
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always',RankWarning)
|
gpl-3.0
|
riordan/professorblastoff
|
pytry.py
|
1
|
1675
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 10
img1 = cv2.imread('box.png',0) # queryImage
img2 = cv2.imread('scene.png',0) # trainImage
# Initiate SIFT detector
sift = cv2.SIFT()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
plt.imshow(img3, 'gray'),plt.show()
|
apache-2.0
|
mikelum/pyspeckit
|
docs/conf.py
|
1
|
12943
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
# Get configuration information from setup.cfg
from distutils import config
conf = config.ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# -*- coding: utf-8 -*-
try:
import numpy
except ImportError:
print "Failed to import numpy"
#try:
# import numpydoc
#except ImportError:
# print "Failed to import numpydoc"
try:
import numpy
print "Succeeded in mocking"
except ImportError:
print "Failed to mock numpy"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, rootpath)
#import numpydoc
#sys.path.insert(0, os.path.split(numpydoc.__file__)[0])
sys.path.insert(0, rootpath+"/docs/sphinxext/")
sys.path.append(os.path.abspath('sphinxext'))
sys.path.append(os.path.abspath('.'))
print "rootpath: ",rootpath
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
#sys.path.insert(0, os.path.abspath('.'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest', 'sphinx.ext.autodoc', 'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'numpydoc', 'flickr', 'edit_on_github',
'edit_on_bitbucket']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
html_sidebars = {'**':['globaltoc.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']}
# General information about the project.
project = u'pyspeckit'
copyright = u'2011, Adam Ginsburg and Jordan Mirocha'
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# read the docs mocks
__import__(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = {'matplotlib', 'matplotlib.pyplot', 'matplotlib.figure',
'matplotlib.widgets', 'matplotlib.cbook', 'pyfits', 'scipy',
'astropy',
'numpy', 'scipy', 'pyfits', 'astropy', 'pytest', 'astropy.wcs',
'astropy.io', 'astropy.io.fits', 'astropy.nddata',
'scipy.interpolate', 'scipy.ndimage', 'pywcs', 'matplotlib',
'matplotlib.pyplot', 'numpy.ma', 'h5py', 'atpy','progressbar'}
for mod_name in MOCK_MODULES:
if mod_name not in sys.modules:
sys.modules[mod_name] = Mock()
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# The short X.Y version.
#import pyspeckit
#version = pyspeckit.__version__
## The full version, including alpha/beta/rc tags.
#release = pyspeckit.__version__
#
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build','_static','_template']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'agogo'
html_style = 'extra.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = dict(
pagewidth = '1000px',
documentwidth = '760px',
sidebarwidth = '200px',
nosidebar=False,
headerbg="#666666",
headercolor1="#000000",
headercolor2="#000000",
headerlinkcolor="#FF9522",
linkcolor="#4a8f43",
textalign='left',
)
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "images/logo.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static','_static/extra.css','_static/scipy.css','_static/astropy.css']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyspeckitdoc'
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyspeckit.tex', u'pyspeckit Documentation',
u'Adam Ginsburg and Jordan Mirocha', 'manual'),
]
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# Try to make autoclass include both __init__ and Class docstrings
autoclass_content = 'both'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
## -- Options for the edit_on_github extension ----------------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['edit_on_github']
versionmod = __import__(setup_cfg['package_name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
edit_on_bitbucket_project = "pyspeckit/pyspeckit"
edit_on_bitbucket_source_root = ""
edit_on_bitbucket_doc_root = "doc"
|
mit
|
kashif/scikit-learn
|
examples/linear_model/plot_sgd_separating_hyperplane.py
|
84
|
1221
|
"""
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([[x1, x2]])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
dwdii/stockyPuck
|
src/zL_monteCarlo.py
|
1
|
4416
|
__author__ = 'Daniel Dittenhafer'
__date__ = 'Oct 25, 2015'
__version__ = 1.0
# Based in part on: https://github.com/dwdii/IS602-AdvProgTech/blob/master/Lesson12/hw12_dittenhafer.ipynb
from zipline.api import order_target, record, symbol, history, add_history
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_bars_from_yahoo
import matplotlib.pyplot as plt
import pandas as pd
import pytz
from datetime import datetime
import kellyCriterion
class MonteCarloTradingAlgorithm(TradingAlgorithm):
def initialize(self):
self.i = 0
self.kelly = kellyCriterion.KellyCriterion()
self.mcHistoryDays = 10
self.mcIterations = 100
# The number of days in the future to simulate
self.mcFutureDays = 1
self.add_history(self.mcHistoryDays, '1d', 'price')
# Need to manually specify the analyze in this mode of execution.
# It would come for free if using the run_algo.py CLI.
#self._analyze = self.analyze
def monteCarloIteration(self, mean, std, start):
import random
sample = list()
for i in range(0, self.mcFutureDays, 1):
sample.append(random.gauss(mean, std))
curPrice = start
walk = list()
for d in sample:
newPrice = curPrice + d
curPrice = newPrice
walk.append(curPrice)
return walk[-1]
def _handle_data(self, context, data):
"""
Overloading the _handle_data method. It must be _handle_data (with leading underscore), not handle_data,
in order to take advantage of base class's history container auto updates, which we use in the history call below.
:param context: TradingAlogorithm base class passes in an extra self so we are calling this context
:param data: The data.
:return:
"""
# Skip first X days to get full windows
self.i += 1
if self.i < self.mcHistoryDays:
return
# What day are we currently processing?
#print(self.datetime)
sym = symbol(eqSymbol)
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
histData = self.history(self.mcHistoryDays, '1d', 'price')
curPrice = histData[sym][-1]
priceDiffs = histData[sym].diff()
meanDiff = priceDiffs.mean()
sdDiff = priceDiffs.std()
mcResults = list()
for i in range(0, self.mcIterations, 1):
res = self.monteCarloIteration(meanDiff, sdDiff, curPrice)
mcResults.append(res)
# Convert to a pandas series so we can use the statistics functions.
mcResultsPd = pd.Series(mcResults)
# What is the price we predict for tomorrow?
# Using some summary statistic of the individual Monte Carlo iteration results.
predictedPrice = mcResultsPd.mean()
wagerFrac = self.kelly.WagerFraction(priceDiffs, curPrice, predictedPrice)
shares = (self.portfolio.cash * wagerFrac) / curPrice
# this function auto balances our cash/stock mix based on a fractional amount we input.
# anything outside the range of [-1.0, 1.0] will utilize financial leverage
self.order_target_percent(sym,wagerFrac)
# Save values for later inspection
self.record(eqSymbol, data[sym].price,
'mc_price', predictedPrice)
print(context.portfolio.portfolio_value)
def analyze(context, perf):
fig = plt.figure()
ax1 = fig.add_subplot(211)
perf.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('portfolio value in $')
ax2 = fig.add_subplot(212)
perf[eqSymbol].plot(ax=ax2)
perf[['mc_price']].plot(ax=ax2)
ax2.set_ylabel('price in $')
plt.legend(loc=0)
plt.show()
if __name__ == "__main__":
# Load data manually from Yahoo! finance
eqSymbol = 'YHOO'
start = datetime(2010, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_bars_from_yahoo(stocks=[eqSymbol], start=start,
end=end)
# Create algorithm object
algo_obj = MonteCarloTradingAlgorithm()
# Run algorithm
perf_manual = algo_obj.run(data)
#print(perf_manual)
#print(perf_manual.ending_value[-1])
|
mit
|
kaichogami/scikit-learn
|
examples/cluster/plot_affinity_propagation.py
|
349
|
2304
|
"""
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
jjhelmus/wradlib
|
examples/typical_workflow.py
|
1
|
6623
|
# -------------------------------------------------------------------------------
# Name: A typical workflow for radar-based rainfall estimation
# Purpose:
#
# Author: heistermann
#
# Created: 26.10.2012
# Copyright: (c) heistermann 2012
# Licence: MIT
# -------------------------------------------------------------------------------
#!/usr/bin/env python
import wradlib
import numpy as np
import matplotlib.pyplot as pl
#pl.interactive(True)
import os
def ex_typical_workflow():
# read the data
data, metadata = wradlib.io.readDX(os.path.join(os.path.dirname(__file__), "data/sample.dx"))
fig = pl.figure()
ax = pl.subplot(111)
ax, pm = wradlib.vis.plot_ppi(data, ax=ax)
cmap = pl.colorbar(pm, shrink=0.75)
# identify and visualise clutters
clutter = wradlib.clutter.filter_gabella(data, tr1=12, n_p=6, tr2=1.1)
fig = pl.figure()
ax = pl.subplot(111)
ax, pm = wradlib.vis.plot_ppi(clutter, ax=ax, cmap=pl.cm.gray)
pl.title('Clutter Map')
cbar = pl.colorbar(pm, shrink=0.75)
# Remove and fill clutter
data_no_clutter = wradlib.ipol.interpolate_polar(data, clutter)
# Attenuation correction according to Kraemer
pia = wradlib.atten.correctAttenuationKraemer(data_no_clutter)
data_attcorr = data_no_clutter + pia
# compare reflectivity with and without attenuation correction for one beam
fig = pl.figure()
ax = pl.subplot(111)
pl.plot(data_attcorr[240], label="attcorr")
pl.plot(data_no_clutter[240], label="no attcorr")
pl.xlabel("km")
pl.ylabel("dBZ")
pl.legend()
# pl.savefig("_test_ppi_attcorr.png")
# converting to rainfall intensity
R = wradlib.zr.z2r(wradlib.trafo.idecibel(data_attcorr))
# and then to rainfall depth over 5 minutes
depth = wradlib.trafo.r2depth(R, 300)
# example for rainfall accumulation in case we have a series of sweeps (here: random numbers)
sweep_times = wradlib.util.from_to("2012-10-26 00:00:00", "2012-10-26 02:00:00", 300)
depths_5min = np.random.uniform(size=(len(sweep_times) - 1, 360, 128))
hours = wradlib.util.from_to("2012-10-26 00:00:00", "2012-10-26 02:00:00", 3600)
depths_hourly = wradlib.util.aggregate_in_time(depths_5min, sweep_times, hours, func='sum')
# Georeferencing
radar_location = (8.005, 47.8744, 1517) # (lon, lat, alt) in decimal degree and meters
elevation = 0.5 # in degree
azimuths = np.arange(0, 360) # in degrees
ranges = np.arange(0, 128000., 1000.) # in meters
polargrid = np.meshgrid(ranges, azimuths)
lon, lat, alt = wradlib.georef.polar2lonlatalt_n(polargrid[0], polargrid[1], elevation, radar_location)
# projection to Gauss Krueger zone 3
proj_gk3 = wradlib.georef.epsg_to_osr(31467)
x, y = wradlib.georef.reproject(lon, lat, projection_target=proj_gk3)
xy = np.vstack((x.ravel(), y.ravel())).transpose()
# transfer the north-east sector to a 1kmx1km grid
xgrid = np.linspace(x.min(), x.mean(), 100)
ygrid = np.linspace(y.min(), y.mean(), 100)
grid_xy = np.meshgrid(xgrid, ygrid)
grid_xy = np.vstack((grid_xy[0].ravel(), grid_xy[1].ravel())).transpose()
gridded = wradlib.comp.togrid(xy, grid_xy, 128000., np.array([x.mean(), y.mean()]), data.ravel(), wradlib.ipol.Idw)
gridded = np.ma.masked_invalid(gridded).reshape((len(xgrid), len(ygrid)))
fig = pl.figure(figsize=(10, 8))
ax = pl.subplot(111, aspect="equal")
pm = pl.pcolormesh(xgrid, ygrid, gridded)
pl.colorbar(pm, shrink=0.75)
pl.xlabel("Easting (m)")
pl.ylabel("Northing (m)")
# Adjustment example
radar_coords = np.arange(0, 101)
truth = np.abs(1.5 + np.sin(0.075 * radar_coords)) + np.random.uniform(-0.1, 0.1, len(radar_coords))
# The radar rainfall estimate ``radar`` is then computed by
# imprinting a multiplicative ``error`` on ``truth`` and adding some noise.
error = 0.75 + 0.015 * radar_coords
radar = error * truth + np.random.uniform(-0.1, 0.1, len(radar_coords))
# Synthetic gage observations ``obs`` are then created by selecting arbitrary "true" values.
obs_coords = np.array([5, 10, 15, 20, 30, 45, 65, 70, 77, 90])
obs = truth[obs_coords]
# Now we adjust the ``radar`` rainfall estimate by using the gage observations.
# First, you create an "adjustment object" from the approach you
# want to use for adjustment. After that, you can call the object with the actual data that is to be adjusted.
# Here, we use a multiplicative error model with spatially heterogenous error
# (see :doc:`wradlib.adjust.AdjustMultiply`).
adjuster = wradlib.adjust.AdjustMultiply(obs_coords, radar_coords, nnear_raws=3)
adjusted = adjuster(obs, radar)
# Let's compare the ``truth``, the ``radar`` rainfall estimate and the ``adjusted`` product:
fig = pl.figure()
ax = pl.subplot(111)
pl.plot(radar_coords, truth, 'k-', label="True rainfall", linewidth=2.)
pl.xlabel("Distance (km)")
pl.ylabel("Rainfall intensity (mm/h)")
pl.plot(radar_coords, radar, 'k-', label="Raw radar rainfall", linewidth=2., linestyle="dashed")
pl.plot(obs_coords, obs, 'o', label="Gage observation", markersize=10.0, markerfacecolor="grey")
pl.plot(radar_coords, adjusted, '-', color="green", label="Multiplicative adjustment", linewidth=2.)
pl.legend(prop={'size': 12})
# Verification
raw_error = wradlib.verify.ErrorMetrics(truth, radar)
adj_error = wradlib.verify.ErrorMetrics(truth, adjusted)
raw_error.report()
adj_error.report()
# Export
# Export your data array as a text file:
np.savetxt("mydata.txt", data)
# Or as a gzip-compressed text file:
np.savetxt("mydata.gz", data)
# Or as a NetCDF file:
import netCDF4
rootgrp = netCDF4.Dataset('test.nc', 'w', format='NETCDF4')
sweep_xy = rootgrp.createGroup('sweep_xy')
dim_azimuth = sweep_xy.createDimension('azimuth', None)
dim_range = sweep_xy.createDimension('range', None)
azimuths_var = sweep_xy.createVariable('azimuths', 'i4', ('azimuth',))
ranges_var = sweep_xy.createVariable('ranges', 'f4', ('range',))
dBZ_var = sweep_xy.createVariable('dBZ', 'f4', ('azimuth', 'range',))
azimuths_var[:] = np.arange(0, 360)
ranges_var[:] = np.arange(0, 128000., 1000.)
dBZ_var[:] = data
rootgrp.bandwith = "C-Band"
sweep_xy.datetime = "2012-11-02 10:15:00"
rootgrp.close()
if __name__ == '__main__':
ex_typical_workflow()
|
mit
|
PatrickOReilly/scikit-learn
|
examples/mixture/plot_gmm_covariances.py
|
89
|
4724
|
"""
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# Modified by Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
print(__doc__)
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
if gmm.covariance_type == 'full':
covariances = gmm.covariances_[n][:2, :2]
elif gmm.covariance_type == 'tied':
covariances = gmm.covariances_[:2, :2]
elif gmm.covariance_type == 'diag':
covariances = np.diag(gmm.covariances_[n][:2])
elif gmm.covariance_type == 'spherical':
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_splits=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((cov_type, GaussianMixture(n_components=n_classes,
covariance_type=cov_type, max_iter=20, random_state=0))
for cov_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators // 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_init = np.array([X_train[y_train == i].mean(axis=0)
for i in range(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators // 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(scatterpoints=1, loc='lower right', prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
rudimeier/numpy
|
doc/source/conf.py
|
63
|
9811
|
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.0.1":
raise RuntimeError("Sphinx 1.0.1 or newer required")
needs_sphinx = '1.0'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.autosummary',
'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# General substitutions.
project = 'NumPy'
copyright = '2008-2009, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import numpy
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = numpy.__version__
print("%s %s" % (version, release))
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if not os.path.isdir(themedir):
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init && git submodule update")
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("http://scipy.org/", "Scipy.org"),
("http://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_sidebars = {'index': 'indexsidebar.html'}
html_additional_pages = {
'index': 'indexcontent.html',
}
html_title = "%s v%s Manual" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_use_modindex = True
html_copy_source = False
html_domain_indices = False
html_file_suffix = '.html'
htmlhelp_basename = 'numpy'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Texinfo output
# -----------------------------------------------------------------------------
texinfo_documents = [
("contents", 'numpy', 'Numpy Documentation', _stdauthor, 'Numpy',
"NumPy: array processing for numbers, strings, records, and objects.",
'Programming',
1),
]
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {'http://docs.python.org/dev': None}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
import glob
autosummary_generate = glob.glob("reference/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
plot_rcparams = {
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(numpy.__file__))
if 'dev' in numpy.__version__:
return "http://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
fn, linespec)
else:
return "http://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
|
bsd-3-clause
|
roxyboy/scikit-learn
|
examples/hetero_feature_union.py
|
288
|
6236
|
"""
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
|
bsd-3-clause
|
BryanCutler/spark
|
python/pyspark/pandas/tests/test_indexing.py
|
1
|
51797
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from distutils.version import LooseVersion
import unittest
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.exceptions import SparkPandasIndexingError
from pyspark.pandas.testing.utils import ComparisonTestBase, ReusedSQLTestCase, compare_both
class BasicIndexingTest(ComparisonTestBase):
@property
def pdf(self):
return pd.DataFrame(
{"month": [1, 4, 7, 10], "year": [2012, 2014, 2013, 2014], "sale": [55, 40, 84, 31]}
)
@compare_both(almost=False)
def test_indexing(self, df):
df1 = df.set_index("month")
yield df1
yield df.set_index("month", drop=False)
yield df.set_index("month", append=True)
yield df.set_index(["year", "month"])
yield df.set_index(["year", "month"], drop=False)
yield df.set_index(["year", "month"], append=True)
yield df1.set_index("year", drop=False, append=True)
df2 = df1.copy()
df2.set_index("year", append=True, inplace=True)
yield df2
self.assertRaisesRegex(KeyError, "unknown", lambda: df.set_index("unknown"))
self.assertRaisesRegex(KeyError, "unknown", lambda: df.set_index(["month", "unknown"]))
for d in [df, df1, df2]:
yield d.reset_index()
yield d.reset_index(drop=True)
yield df1.reset_index(level=0)
yield df2.reset_index(level=1)
yield df2.reset_index(level=[1, 0])
yield df1.reset_index(level="month")
yield df2.reset_index(level="year")
yield df2.reset_index(level=["month", "year"])
yield df2.reset_index(level="month", drop=True)
yield df2.reset_index(level=["month", "year"], drop=True)
self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 1 level, not 3",
lambda: df1.reset_index(level=2),
)
self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 1 level, not 4",
lambda: df1.reset_index(level=[3, 2]),
)
self.assertRaisesRegex(KeyError, "unknown.*month", lambda: df1.reset_index(level="unknown"))
self.assertRaisesRegex(
KeyError, "Level unknown not found", lambda: df2.reset_index(level="unknown")
)
df3 = df2.copy()
df3.reset_index(inplace=True)
yield df3
yield df1.sale.reset_index()
yield df1.sale.reset_index(level=0)
yield df2.sale.reset_index(level=[1, 0])
yield df1.sale.reset_index(drop=True)
yield df1.sale.reset_index(name="s")
yield df1.sale.reset_index(name="s", drop=True)
s = df1.sale
self.assertRaisesRegex(
TypeError,
"Cannot reset_index inplace on a Series to create a DataFrame",
lambda: s.reset_index(inplace=True),
)
s.reset_index(drop=True, inplace=True)
yield s
yield df1
# multi-index columns
df4 = df.copy()
df4.columns = pd.MultiIndex.from_tuples(
[("cal", "month"), ("cal", "year"), ("num", "sale")]
)
df5 = df4.set_index(("cal", "month"))
yield df5
yield df4.set_index([("cal", "month"), ("num", "sale")])
self.assertRaises(KeyError, lambda: df5.reset_index(level=("cal", "month")))
yield df5.reset_index(level=[("cal", "month")])
# non-string names
df6 = df.copy()
df6.columns = [10.0, 20.0, 30.0]
df7 = df6.set_index(10.0)
yield df7
yield df6.set_index([10.0, 30.0])
yield df7.reset_index(level=10.0)
yield df7.reset_index(level=[10.0])
df8 = df.copy()
df8.columns = pd.MultiIndex.from_tuples([(10, "month"), (10, "year"), (20, "sale")])
df9 = df8.set_index((10, "month"))
yield df9
yield df8.set_index([(10, "month"), (20, "sale")])
yield df9.reset_index(level=[(10, "month")])
def test_from_pandas_with_explicit_index(self):
pdf = self.pdf
df1 = ps.from_pandas(pdf.set_index("month"))
self.assertPandasEqual(df1.to_pandas(), pdf.set_index("month"))
df2 = ps.from_pandas(pdf.set_index(["year", "month"]))
self.assertPandasEqual(df2.to_pandas(), pdf.set_index(["year", "month"]))
def test_limitations(self):
df = self.kdf.set_index("month")
self.assertRaisesRegex(
ValueError,
"Level should be all int or all string.",
lambda: df.reset_index([1, "month"]),
)
class IndexingTest(ReusedSQLTestCase):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ps.from_pandas(self.pdf)
@property
def pdf2(self):
return pd.DataFrame(
{0: [1, 2, 3, 4, 5, 6, 7, 8, 9], 1: [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf2(self):
return ps.from_pandas(self.pdf2)
def test_at(self):
pdf = self.pdf
kdf = self.kdf
# Create the equivalent of pdf.loc[3] as a Koalas Series
# This is necessary because .loc[n] does not currently work with Koalas DataFrames (#383)
test_series = ps.Series([3, 6], index=["a", "b"], name="3")
# Assert invalided signatures raise TypeError
with self.assertRaises(TypeError, msg="Use DataFrame.at like .at[row_index, column_name]"):
kdf.at[3]
with self.assertRaises(TypeError, msg="Use DataFrame.at like .at[row_index, column_name]"):
kdf.at["ab"] # 'ab' is of length 2 but str type instead of tuple
with self.assertRaises(TypeError, msg="Use Series.at like .at[column_name]"):
test_series.at[3, "b"]
# Assert .at for DataFrames
self.assertEqual(kdf.at[3, "b"], 6)
self.assertEqual(kdf.at[3, "b"], pdf.at[3, "b"])
self.assert_eq(kdf.at[9, "b"], np.array([0, 0, 0]))
self.assert_eq(kdf.at[9, "b"], pdf.at[9, "b"])
# Assert .at for Series
self.assertEqual(test_series.at["b"], 6)
self.assertEqual(test_series.at["b"], pdf.loc[3].at["b"])
# Assert multi-character indices
self.assertEqual(
ps.Series([0, 1], index=["ab", "cd"]).at["ab"],
pd.Series([0, 1], index=["ab", "cd"]).at["ab"],
)
# Assert invalid column or index names result in a KeyError like with pandas
with self.assertRaises(KeyError, msg="x"):
kdf.at[3, "x"]
with self.assertRaises(KeyError, msg=99):
kdf.at[99, "b"]
with self.assertRaises(ValueError):
kdf.at[(3, 6), "b"]
with self.assertRaises(KeyError):
kdf.at[3, ("x", "b")]
# Assert setting values fails
with self.assertRaises(TypeError):
kdf.at[3, "b"] = 10
# non-string column names
pdf = self.pdf2
kdf = self.kdf2
# Assert .at for DataFrames
self.assertEqual(kdf.at[3, 1], 6)
self.assertEqual(kdf.at[3, 1], pdf.at[3, 1])
self.assert_eq(kdf.at[9, 1], np.array([0, 0, 0]))
self.assert_eq(kdf.at[9, 1], pdf.at[9, 1])
def test_at_multiindex(self):
pdf = self.pdf.set_index("b", append=True)
kdf = self.kdf.set_index("b", append=True)
# TODO: seems like a pandas' bug in pandas>=1.1.0
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
self.assert_eq(kdf.at[(3, 6), "a"], pdf.at[(3, 6), "a"])
self.assert_eq(kdf.at[(3,), "a"], pdf.at[(3,), "a"])
self.assert_eq(list(kdf.at[(9, 0), "a"]), list(pdf.at[(9, 0), "a"]))
self.assert_eq(list(kdf.at[(9,), "a"]), list(pdf.at[(9,), "a"]))
else:
self.assert_eq(kdf.at[(3, 6), "a"], 3)
self.assert_eq(kdf.at[(3,), "a"], np.array([3]))
self.assert_eq(list(kdf.at[(9, 0), "a"]), [7, 8, 9])
self.assert_eq(list(kdf.at[(9,), "a"]), [7, 8, 9])
with self.assertRaises(ValueError):
kdf.at[3, "a"]
def test_at_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.at["B", ("bar", "one")], pdf.at["B", ("bar", "one")])
with self.assertRaises(KeyError):
kdf.at["B", "bar"]
# non-string column names
arrays = [np.array([0, 0, 1, 1]), np.array([1, 2, 1, 2])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.at["B", (0, 1)], pdf.at["B", (0, 1)])
def test_iat(self):
pdf = self.pdf
kdf = self.kdf
# Create the equivalent of pdf.loc[3] as a Koalas Series
# This is necessary because .loc[n] does not currently work with Koalas DataFrames (#383)
test_series = ps.Series([3, 6], index=["a", "b"], name="3")
# Assert invalided signatures raise TypeError
with self.assertRaises(
TypeError,
msg="Use DataFrame.at like .iat[row_interget_position, column_integer_position]",
):
kdf.iat[3]
with self.assertRaises(
ValueError, msg="iAt based indexing on multi-index can only have tuple values"
):
kdf.iat[3, "b"] # 'ab' is of length 2 but str type instead of tuple
with self.assertRaises(TypeError, msg="Use Series.iat like .iat[row_integer_position]"):
test_series.iat[3, "b"]
# Assert .iat for DataFrames
self.assertEqual(kdf.iat[7, 0], 8)
self.assertEqual(kdf.iat[7, 0], pdf.iat[7, 0])
# Assert .iat for Series
self.assertEqual(test_series.iat[1], 6)
self.assertEqual(test_series.iat[1], pdf.loc[3].iat[1])
# Assert invalid column or integer position result in a KeyError like with pandas
with self.assertRaises(KeyError, msg=99):
kdf.iat[0, 99]
with self.assertRaises(KeyError, msg=99):
kdf.iat[99, 0]
with self.assertRaises(ValueError):
kdf.iat[(1, 1), 1]
with self.assertRaises(ValueError):
kdf.iat[1, (1, 1)]
# Assert setting values fails
with self.assertRaises(TypeError):
kdf.iat[4, 1] = 10
def test_iat_multiindex(self):
pdf = self.pdf.set_index("b", append=True)
kdf = self.kdf.set_index("b", append=True)
self.assert_eq(kdf.iat[7, 0], pdf.iat[7, 0])
with self.assertRaises(ValueError):
kdf.iat[3, "a"]
def test_iat_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.iat[1, 3], pdf.iat[1, 3])
with self.assertRaises(KeyError):
kdf.iat[0, 99]
with self.assertRaises(KeyError):
kdf.iat[99, 0]
def test_loc(self):
kdf = self.kdf
pdf = self.pdf
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
self.assert_eq(kdf.loc[3:8], pdf.loc[3:8])
self.assert_eq(kdf.loc[:8], pdf.loc[:8])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[[5]], pdf.loc[[5]])
self.assert_eq(kdf.loc[:], pdf.loc[:])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 8]], pdf.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 9]], pdf.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.loc[np.array([3, 4, 1, 9])], pdf.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[5:5], pdf.a.loc[5:5])
self.assert_eq(kdf.a.loc[3:8], pdf.a.loc[3:8])
self.assert_eq(kdf.a.loc[:8], pdf.a.loc[:8])
self.assert_eq(kdf.a.loc[3:], pdf.a.loc[3:])
self.assert_eq(kdf.a.loc[[5]], pdf.a.loc[[5]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 8]], pdf.a.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 9]], pdf.a.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.a.loc[np.array([3, 4, 1, 9])],
# pdf.a.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[[]], pdf.a.loc[[]])
self.assert_eq(kdf.a.loc[np.array([])], pdf.a.loc[np.array([])])
self.assert_eq(kdf.loc[1000:], pdf.loc[1000:])
self.assert_eq(kdf.loc[-2000:-1000], pdf.loc[-2000:-1000])
self.assert_eq(kdf.loc[5], pdf.loc[5])
self.assert_eq(kdf.loc[9], pdf.loc[9])
self.assert_eq(kdf.a.loc[5], pdf.a.loc[5])
self.assert_eq(kdf.a.loc[9], pdf.a.loc[9])
self.assertRaises(KeyError, lambda: kdf.loc[10])
self.assertRaises(KeyError, lambda: kdf.a.loc[10])
# monotonically increasing index test
pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]}, index=[0, 1, 1, 2, 2, 2, 4, 5, 6])
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.loc[:2], pdf.loc[:2])
self.assert_eq(kdf.loc[:3], pdf.loc[:3])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[4:], pdf.loc[4:])
self.assert_eq(kdf.loc[3:2], pdf.loc[3:2])
self.assert_eq(kdf.loc[-1:2], pdf.loc[-1:2])
self.assert_eq(kdf.loc[3:10], pdf.loc[3:10])
# monotonically decreasing index test
pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]}, index=[6, 5, 5, 4, 4, 4, 2, 1, 0])
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.loc[:4], pdf.loc[:4])
self.assert_eq(kdf.loc[:3], pdf.loc[:3])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[2:], pdf.loc[2:])
self.assert_eq(kdf.loc[2:3], pdf.loc[2:3])
self.assert_eq(kdf.loc[2:-1], pdf.loc[2:-1])
self.assert_eq(kdf.loc[10:3], pdf.loc[10:3])
# test when type of key is string and given value is not included in key
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=["a", "b", "d"])
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.loc["a":"z"], pdf.loc["a":"z"])
# KeyError when index is not monotonic increasing or decreasing
# and specified values don't exist in index
kdf = ps.DataFrame([[1, 2], [4, 5], [7, 8]], index=["cobra", "viper", "sidewinder"])
self.assertRaises(KeyError, lambda: kdf.loc["cobra":"koalas"])
self.assertRaises(KeyError, lambda: kdf.loc["koalas":"viper"])
kdf = ps.DataFrame([[1, 2], [4, 5], [7, 8]], index=[10, 30, 20])
self.assertRaises(KeyError, lambda: kdf.loc[0:30])
self.assertRaises(KeyError, lambda: kdf.loc[10:100])
def test_loc_non_informative_index(self):
pdf = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 30, 40])
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.loc[20:30], pdf.loc[20:30])
pdf = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 20, 40])
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.loc[20:20], pdf.loc[20:20])
def test_loc_with_series(self):
kdf = self.kdf
pdf = self.pdf
self.assert_eq(kdf.loc[kdf.a % 2 == 0], pdf.loc[pdf.a % 2 == 0])
self.assert_eq(kdf.loc[kdf.a % 2 == 0, "a"], pdf.loc[pdf.a % 2 == 0, "a"])
self.assert_eq(kdf.loc[kdf.a % 2 == 0, ["a"]], pdf.loc[pdf.a % 2 == 0, ["a"]])
self.assert_eq(kdf.a.loc[kdf.a % 2 == 0], pdf.a.loc[pdf.a % 2 == 0])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0], pdf.loc[pdf.copy().a % 2 == 0])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0, "a"], pdf.loc[pdf.copy().a % 2 == 0, "a"])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0, ["a"]], pdf.loc[pdf.copy().a % 2 == 0, ["a"]])
self.assert_eq(kdf.a.loc[kdf.copy().a % 2 == 0], pdf.a.loc[pdf.copy().a % 2 == 0])
def test_loc_noindex(self):
kdf = self.kdf
kdf = kdf.reset_index()
pdf = self.pdf
pdf = pdf.reset_index()
self.assert_eq(kdf[["a"]], pdf[["a"]])
self.assert_eq(kdf.loc[:], pdf.loc[:])
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
def test_loc_multiindex(self):
kdf = self.kdf
kdf = kdf.set_index("b", append=True)
pdf = self.pdf
pdf = pdf.set_index("b", append=True)
self.assert_eq(kdf.loc[:], pdf.loc[:])
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
self.assert_eq(kdf.loc[5:9], pdf.loc[5:9])
self.assert_eq(kdf.loc[5], pdf.loc[5])
self.assert_eq(kdf.loc[9], pdf.loc[9])
# TODO: self.assert_eq(kdf.loc[(5, 3)], pdf.loc[(5, 3)])
# TODO: self.assert_eq(kdf.loc[(9, 0)], pdf.loc[(9, 0)])
self.assert_eq(kdf.a.loc[5], pdf.a.loc[5])
self.assert_eq(kdf.a.loc[9], pdf.a.loc[9])
self.assertTrue((kdf.a.loc[(5, 3)] == pdf.a.loc[(5, 3)]).all())
self.assert_eq(kdf.a.loc[(9, 0)], pdf.a.loc[(9, 0)])
# monotonically increasing index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("y", "d"), ("z", "e")]
),
)
kdf = ps.from_pandas(pdf)
for rows_sel in [
slice(None),
slice("y", None),
slice(None, "y"),
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically increasing", rows_sel=rows_sel):
self.assert_eq(kdf.loc[rows_sel], pdf.loc[rows_sel])
self.assert_eq(kdf.a.loc[rows_sel], pdf.a.loc[rows_sel])
# monotonically increasing first index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("y", "a"), ("z", "e")]
),
)
kdf = ps.from_pandas(pdf)
for rows_sel in [
slice(None),
slice("y", None),
slice(None, "y"),
]:
with self.subTest("monotonically increasing first index", rows_sel=rows_sel):
self.assert_eq(kdf.loc[rows_sel], pdf.loc[rows_sel])
self.assert_eq(kdf.a.loc[rows_sel], pdf.a.loc[rows_sel])
for rows_sel in [
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically increasing first index", rows_sel=rows_sel):
self.assertRaises(KeyError, lambda: kdf.loc[rows_sel])
self.assertRaises(KeyError, lambda: kdf.a.loc[rows_sel])
# not monotonically increasing index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("z", "e"), ("y", "d"), ("y", "c"), ("x", "b"), ("x", "a")]
),
)
kdf = ps.from_pandas(pdf)
for rows_sel in [
slice("y", None),
slice(None, "y"),
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically decreasing", rows_sel=rows_sel):
self.assertRaises(KeyError, lambda: kdf.loc[rows_sel])
self.assertRaises(KeyError, lambda: kdf.a.loc[rows_sel])
def test_loc2d_multiindex(self):
kdf = self.kdf
kdf = kdf.set_index("b", append=True)
pdf = self.pdf
pdf = pdf.set_index("b", append=True)
self.assert_eq(kdf.loc[:, :], pdf.loc[:, :])
self.assert_eq(kdf.loc[:, "a"], pdf.loc[:, "a"])
self.assert_eq(kdf.loc[5:5, "a"], pdf.loc[5:5, "a"])
self.assert_eq(kdf.loc[:, "a":"a"], pdf.loc[:, "a":"a"])
self.assert_eq(kdf.loc[:, "a":"c"], pdf.loc[:, "a":"c"])
self.assert_eq(kdf.loc[:, "b":"c"], pdf.loc[:, "b":"c"])
def test_loc2d(self):
kdf = self.kdf
pdf = self.pdf
# index indexer is always regarded as slice for duplicated values
self.assert_eq(kdf.loc[5:5, "a"], pdf.loc[5:5, "a"])
self.assert_eq(kdf.loc[[5], "a"], pdf.loc[[5], "a"])
self.assert_eq(kdf.loc[5:5, ["a"]], pdf.loc[5:5, ["a"]])
self.assert_eq(kdf.loc[[5], ["a"]], pdf.loc[[5], ["a"]])
self.assert_eq(kdf.loc[:, :], pdf.loc[:, :])
self.assert_eq(kdf.loc[3:8, "a"], pdf.loc[3:8, "a"])
self.assert_eq(kdf.loc[:8, "a"], pdf.loc[:8, "a"])
self.assert_eq(kdf.loc[3:, "a"], pdf.loc[3:, "a"])
self.assert_eq(kdf.loc[[8], "a"], pdf.loc[[8], "a"])
self.assert_eq(kdf.loc[3:8, ["a"]], pdf.loc[3:8, ["a"]])
self.assert_eq(kdf.loc[:8, ["a"]], pdf.loc[:8, ["a"]])
self.assert_eq(kdf.loc[3:, ["a"]], pdf.loc[3:, ["a"]])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 3], ['a']], pdf.loc[[3, 4, 3], ['a']])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.loc[3, 3, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[3, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[3:, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[kdf.a % 2 == 0, 3])
self.assert_eq(kdf.loc[5, "a"], pdf.loc[5, "a"])
self.assert_eq(kdf.loc[9, "a"], pdf.loc[9, "a"])
self.assert_eq(kdf.loc[5, ["a"]], pdf.loc[5, ["a"]])
self.assert_eq(kdf.loc[9, ["a"]], pdf.loc[9, ["a"]])
self.assert_eq(kdf.loc[:, "a":"a"], pdf.loc[:, "a":"a"])
self.assert_eq(kdf.loc[:, "a":"d"], pdf.loc[:, "a":"d"])
self.assert_eq(kdf.loc[:, "c":"d"], pdf.loc[:, "c":"d"])
# bool list-like column select
bool_list = [True, False]
self.assert_eq(kdf.loc[:, bool_list], pdf.loc[:, bool_list])
self.assert_eq(kdf.loc[:, np.array(bool_list)], pdf.loc[:, np.array(bool_list)])
pser = pd.Series(bool_list, index=pdf.columns)
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
pser = pd.Series(list(reversed(bool_list)), index=list(reversed(pdf.columns)))
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
self.assertRaises(IndexError, lambda: kdf.loc[:, bool_list[:-1]])
self.assertRaises(IndexError, lambda: kdf.loc[:, np.array(bool_list + [True])])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.loc[:, pd.Series(bool_list)])
# non-string column names
kdf = self.kdf2
pdf = self.pdf2
self.assert_eq(kdf.loc[5:5, 0], pdf.loc[5:5, 0])
self.assert_eq(kdf.loc[5:5, [0]], pdf.loc[5:5, [0]])
self.assert_eq(kdf.loc[3:8, 0], pdf.loc[3:8, 0])
self.assert_eq(kdf.loc[3:8, [0]], pdf.loc[3:8, [0]])
self.assert_eq(kdf.loc[:, 0:0], pdf.loc[:, 0:0])
self.assert_eq(kdf.loc[:, 0:3], pdf.loc[:, 0:3])
self.assert_eq(kdf.loc[:, 2:3], pdf.loc[:, 2:3])
def test_loc2d_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.loc["B":"B", "bar"], pdf.loc["B":"B", "bar"])
self.assert_eq(kdf.loc["B":"B", ["bar"]], pdf.loc["B":"B", ["bar"]])
self.assert_eq(kdf.loc[:, "bar":"bar"], pdf.loc[:, "bar":"bar"])
self.assert_eq(kdf.loc[:, "bar":("baz", "one")], pdf.loc[:, "bar":("baz", "one")])
self.assert_eq(
kdf.loc[:, ("bar", "two"):("baz", "one")], pdf.loc[:, ("bar", "two"):("baz", "one")]
)
self.assert_eq(kdf.loc[:, ("bar", "two"):"bar"], pdf.loc[:, ("bar", "two"):"bar"])
self.assert_eq(kdf.loc[:, "a":"bax"], pdf.loc[:, "a":"bax"])
self.assert_eq(
kdf.loc[:, ("bar", "x"):("baz", "a")],
pdf.loc[:, ("bar", "x"):("baz", "a")],
almost=True,
)
pdf = pd.DataFrame(
np.random.randn(3, 4),
index=["A", "B", "C"],
columns=pd.MultiIndex.from_tuples(
[("bar", "two"), ("bar", "one"), ("baz", "one"), ("baz", "two")]
),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.loc[:, "bar":"baz"], pdf.loc[:, "bar":"baz"])
self.assertRaises(KeyError, lambda: kdf.loc[:, "bar":("baz", "one")])
self.assertRaises(KeyError, lambda: kdf.loc[:, ("bar", "two"):"bar"])
# bool list-like column select
bool_list = [True, False, True, False]
self.assert_eq(kdf.loc[:, bool_list], pdf.loc[:, bool_list])
self.assert_eq(kdf.loc[:, np.array(bool_list)], pdf.loc[:, np.array(bool_list)])
pser = pd.Series(bool_list, index=pdf.columns)
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
pser = pd.Series(list(reversed(bool_list)), index=list(reversed(pdf.columns)))
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
# non-string column names
arrays = [np.array([0, 0, 1, 1]), np.array([1, 2, 1, 2])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.loc["B":"B", 0], pdf.loc["B":"B", 0])
self.assert_eq(kdf.loc["B":"B", [0]], pdf.loc["B":"B", [0]])
self.assert_eq(kdf.loc[:, 0:0], pdf.loc[:, 0:0])
self.assert_eq(kdf.loc[:, 0:(1, 1)], pdf.loc[:, 0:(1, 1)])
self.assert_eq(kdf.loc[:, (0, 2):(1, 1)], pdf.loc[:, (0, 2):(1, 1)])
self.assert_eq(kdf.loc[:, (0, 2):0], pdf.loc[:, (0, 2):0])
self.assert_eq(kdf.loc[:, -1:2], pdf.loc[:, -1:2])
def test_loc2d_with_known_divisions(self):
pdf = pd.DataFrame(
np.random.randn(20, 5), index=list("abcdefghijklmnopqrst"), columns=list("ABCDE")
)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.loc[["a"], "A"], pdf.loc[["a"], "A"])
self.assert_eq(kdf.loc[["a"], ["A"]], pdf.loc[["a"], ["A"]])
self.assert_eq(kdf.loc["a":"o", "A"], pdf.loc["a":"o", "A"])
self.assert_eq(kdf.loc["a":"o", ["A"]], pdf.loc["a":"o", ["A"]])
self.assert_eq(kdf.loc[["n"], ["A"]], pdf.loc[["n"], ["A"]])
self.assert_eq(kdf.loc[["a", "c", "n"], ["A"]], pdf.loc[["a", "c", "n"], ["A"]])
# TODO?: self.assert_eq(kdf.loc[['t', 'b'], ['A']], pdf.loc[['t', 'b'], ['A']])
# TODO?: self.assert_eq(kdf.loc[['r', 'r', 'c', 'g', 'h'], ['A']],
# TODO?: pdf.loc[['r', 'r', 'c', 'g', 'h'], ['A']])
@unittest.skip("TODO: should handle duplicated columns properly")
def test_loc2d_duplicated_columns(self):
pdf = pd.DataFrame(
np.random.randn(20, 5), index=list("abcdefghijklmnopqrst"), columns=list("AABCD")
)
kdf = ps.from_pandas(pdf)
# TODO?: self.assert_eq(kdf.loc[['a'], 'A'], pdf.loc[['a'], 'A'])
# TODO?: self.assert_eq(kdf.loc[['a'], ['A']], pdf.loc[['a'], ['A']])
self.assert_eq(kdf.loc[["j"], "B"], pdf.loc[["j"], "B"])
self.assert_eq(kdf.loc[["j"], ["B"]], pdf.loc[["j"], ["B"]])
# TODO?: self.assert_eq(kdf.loc['a':'o', 'A'], pdf.loc['a':'o', 'A'])
# TODO?: self.assert_eq(kdf.loc['a':'o', ['A']], pdf.loc['a':'o', ['A']])
self.assert_eq(kdf.loc["j":"q", "B"], pdf.loc["j":"q", "B"])
self.assert_eq(kdf.loc["j":"q", ["B"]], pdf.loc["j":"q", ["B"]])
# TODO?: self.assert_eq(kdf.loc['a':'o', 'B':'D'], pdf.loc['a':'o', 'B':'D'])
# TODO?: self.assert_eq(kdf.loc['a':'o', 'B':'D'], pdf.loc['a':'o', 'B':'D'])
# TODO?: self.assert_eq(kdf.loc['j':'q', 'B':'A'], pdf.loc['j':'q', 'B':'A'])
# TODO?: self.assert_eq(kdf.loc['j':'q', 'B':'A'], pdf.loc['j':'q', 'B':'A'])
self.assert_eq(kdf.loc[kdf.B > 0, "B"], pdf.loc[pdf.B > 0, "B"])
# TODO?: self.assert_eq(kdf.loc[kdf.B > 0, ['A', 'C']], pdf.loc[pdf.B > 0, ['A', 'C']])
def test_getitem(self):
pdf = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
columns=list("ABC"),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf["A"], pdf["A"])
self.assert_eq(kdf[["A", "B"]], pdf[["A", "B"]])
self.assert_eq(kdf[kdf.C], pdf[pdf.C])
self.assertRaises(KeyError, lambda: kdf["X"])
self.assertRaises(KeyError, lambda: kdf[["A", "X"]])
self.assertRaises(AttributeError, lambda: kdf.X)
# not str/unicode
# TODO?: pdf = pd.DataFrame(np.random.randn(10, 5))
# TODO?: kdf = ps.from_pandas(pdf)
# TODO?: self.assert_eq(kdf[0], pdf[0])
# TODO?: self.assert_eq(kdf[[1, 2]], pdf[[1, 2]])
# TODO?: self.assertRaises(KeyError, lambda: pdf[8])
# TODO?: self.assertRaises(KeyError, lambda: pdf[[1, 8]])
# non-string column names
pdf = pd.DataFrame(
{
10: [1, 2, 3, 4, 5, 6, 7, 8, 9],
20: [9, 8, 7, 6, 5, 4, 3, 2, 1],
30: [True, False, True] * 3,
}
)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf[10], pdf[10])
self.assert_eq(kdf[[10, 20]], pdf[[10, 20]])
def test_getitem_slice(self):
pdf = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
index=list("abcdefghi"),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf["a":"e"], pdf["a":"e"])
self.assert_eq(kdf["a":"b"], pdf["a":"b"])
self.assert_eq(kdf["f":], pdf["f":])
def test_loc_on_numpy_datetimes(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(np.datetime64, ["2014", "2015", "2016"]))
)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.loc["2014":"2015"], pdf.loc["2014":"2015"])
def test_loc_on_pandas_datetimes(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(pd.Timestamp, ["2014", "2015", "2016"]))
)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.loc["2014":"2015"], pdf.loc["2014":"2015"])
@unittest.skip("TODO?: the behavior of slice for datetime")
def test_loc_datetime_no_freq(self):
datetime_index = pd.date_range("2016-01-01", "2016-01-31", freq="12h")
datetime_index.freq = None # FORGET FREQUENCY
pdf = pd.DataFrame({"num": range(len(datetime_index))}, index=datetime_index)
kdf = ps.from_pandas(pdf)
slice_ = slice("2016-01-03", "2016-01-05")
result = kdf.loc[slice_, :]
expected = pdf.loc[slice_, :]
self.assert_eq(result, expected)
@unittest.skip("TODO?: the behavior of slice for datetime")
def test_loc_timestamp_str(self):
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
kdf = ps.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf.loc['2011-01-02'],
# TODO?: kdf.loc['2011-01-02'])
self.assert_eq(pdf.loc["2011-01-02":"2011-01-05"], kdf.loc["2011-01-02":"2011-01-05"])
# series
# TODO?: self.assert_eq(pdf.A.loc['2011-01-02'],
# TODO?: kdf.A.loc['2011-01-02'])
self.assert_eq(pdf.A.loc["2011-01-02":"2011-01-05"], kdf.A.loc["2011-01-02":"2011-01-05"])
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="M", periods=100),
)
kdf = ps.from_pandas(pdf)
# TODO?: self.assert_eq(pdf.loc['2011-01'], kdf.loc['2011-01'])
# TODO?: self.assert_eq(pdf.loc['2011'], kdf.loc['2011'])
self.assert_eq(pdf.loc["2011-01":"2012-05"], kdf.loc["2011-01":"2012-05"])
self.assert_eq(pdf.loc["2011":"2015"], kdf.loc["2011":"2015"])
# series
# TODO?: self.assert_eq(pdf.B.loc['2011-01'], kdf.B.loc['2011-01'])
# TODO?: self.assert_eq(pdf.B.loc['2011'], kdf.B.loc['2011'])
self.assert_eq(pdf.B.loc["2011-01":"2012-05"], kdf.B.loc["2011-01":"2012-05"])
self.assert_eq(pdf.B.loc["2011":"2015"], kdf.B.loc["2011":"2015"])
@unittest.skip("TODO?: the behavior of slice for datetime")
def test_getitem_timestamp_str(self):
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
kdf = ps.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf['2011-01-02'],
# TODO?: kdf['2011-01-02'])
self.assert_eq(pdf["2011-01-02":"2011-01-05"], kdf["2011-01-02":"2011-01-05"])
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="M", periods=100),
)
kdf = ps.from_pandas(pdf)
# TODO?: self.assert_eq(pdf['2011-01'], kdf['2011-01'])
# TODO?: self.assert_eq(pdf['2011'], kdf['2011'])
self.assert_eq(pdf["2011-01":"2012-05"], kdf["2011-01":"2012-05"])
self.assert_eq(pdf["2011":"2015"], kdf["2011":"2015"])
@unittest.skip("TODO?: period index can't convert to DataFrame correctly")
def test_getitem_period_str(self):
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.period_range("2011-01-01", freq="H", periods=100),
)
kdf = ps.from_pandas(pdf)
# partial string slice
# TODO?: self.assert_eq(pdf['2011-01-02'],
# TODO?: kdf['2011-01-02'])
self.assert_eq(pdf["2011-01-02":"2011-01-05"], kdf["2011-01-02":"2011-01-05"])
pdf = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.period_range("2011-01-01", freq="M", periods=100),
)
kdf = ps.from_pandas(pdf)
# TODO?: self.assert_eq(pdf['2011-01'], kdf['2011-01'])
# TODO?: self.assert_eq(pdf['2011'], kdf['2011'])
self.assert_eq(pdf["2011-01":"2012-05"], kdf["2011-01":"2012-05"])
self.assert_eq(pdf["2011":"2015"], kdf["2011":"2015"])
def test_iloc(self):
pdf = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.iloc[0, 0], pdf.iloc[0, 0])
for indexer in [0, [0], [0, 1], [1, 0], [False, True, True], slice(0, 1)]:
self.assert_eq(kdf.iloc[:, indexer], pdf.iloc[:, indexer])
self.assert_eq(kdf.iloc[:1, indexer], pdf.iloc[:1, indexer])
self.assert_eq(kdf.iloc[:-1, indexer], pdf.iloc[:-1, indexer])
# self.assert_eq(kdf.iloc[kdf.index == 2, indexer], pdf.iloc[pdf.index == 2, indexer])
def test_iloc_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ps.from_pandas(pdf)
for indexer in [0, [0], [0, 1], [1, 0], [False, True, True, True], slice(0, 1)]:
self.assert_eq(kdf.iloc[:, indexer], pdf.iloc[:, indexer])
self.assert_eq(kdf.iloc[:1, indexer], pdf.iloc[:1, indexer])
self.assert_eq(kdf.iloc[:-1, indexer], pdf.iloc[:-1, indexer])
# self.assert_eq(kdf.iloc[kdf.index == "B", indexer],
# pdf.iloc[pdf.index == "B", indexer])
def test_iloc_series(self):
pser = pd.Series([1, 2, 3])
kser = ps.from_pandas(pser)
self.assert_eq(kser.iloc[0], pser.iloc[0])
self.assert_eq(kser.iloc[:], pser.iloc[:])
self.assert_eq(kser.iloc[:1], pser.iloc[:1])
self.assert_eq(kser.iloc[:-1], pser.iloc[:-1])
self.assert_eq((kser + 1).iloc[0], (pser + 1).iloc[0])
self.assert_eq((kser + 1).iloc[:], (pser + 1).iloc[:])
self.assert_eq((kser + 1).iloc[:1], (pser + 1).iloc[:1])
self.assert_eq((kser + 1).iloc[:-1], (pser + 1).iloc[:-1])
def test_iloc_slice_rows_sel(self):
pdf = pd.DataFrame({"A": [1, 2] * 5, "B": [3, 4] * 5, "C": [5, 6] * 5})
kdf = ps.from_pandas(pdf)
for rows_sel in [
slice(None),
slice(0, 1),
slice(1, 2),
slice(-3, None),
slice(None, -3),
slice(None, 0),
slice(None, None, 3),
slice(3, 8, 2),
slice(None, None, -2),
slice(8, 3, -2),
slice(8, None, -2),
slice(None, 3, -2),
]:
with self.subTest(rows_sel=rows_sel):
self.assert_eq(kdf.iloc[rows_sel].sort_index(), pdf.iloc[rows_sel].sort_index())
self.assert_eq(kdf.A.iloc[rows_sel].sort_index(), pdf.A.iloc[rows_sel].sort_index())
self.assert_eq(
(kdf.A + 1).iloc[rows_sel].sort_index(), (pdf.A + 1).iloc[rows_sel].sort_index()
)
def test_iloc_iterable_rows_sel(self):
pdf = pd.DataFrame({"A": [1, 2] * 5, "B": [3, 4] * 5, "C": [5, 6] * 5})
kdf = ps.from_pandas(pdf)
for rows_sel in [
[],
np.array([0, 1]),
[1, 2],
np.array([-3]),
[3],
np.array([-2]),
[8, 3, -5],
]:
with self.subTest(rows_sel=rows_sel):
self.assert_eq(kdf.iloc[rows_sel].sort_index(), pdf.iloc[rows_sel].sort_index())
self.assert_eq(kdf.A.iloc[rows_sel].sort_index(), pdf.A.iloc[rows_sel].sort_index())
self.assert_eq(
(kdf.A + 1).iloc[rows_sel].sort_index(), (pdf.A + 1).iloc[rows_sel].sort_index()
)
with self.subTest(rows_sel=rows_sel):
self.assert_eq(
kdf.iloc[rows_sel, :].sort_index(), pdf.iloc[rows_sel, :].sort_index()
)
with self.subTest(rows_sel=rows_sel):
self.assert_eq(
kdf.iloc[rows_sel, :1].sort_index(), pdf.iloc[rows_sel, :1].sort_index()
)
def test_frame_loc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf = ps.from_pandas(pdf)
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
pdf.loc[["viper", "sidewinder"], ["shield", "max_speed"]] = 10
kdf.loc[["viper", "sidewinder"], ["shield", "max_speed"]] = 10
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf.loc[["viper", "sidewinder"], "shield"] = 50
kdf.loc[["viper", "sidewinder"], "shield"] = 50
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf.loc["cobra", "max_speed"] = 30
kdf.loc["cobra", "max_speed"] = 30
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf.loc[pdf.max_speed < 5, "max_speed"] = -pdf.max_speed
kdf.loc[kdf.max_speed < 5, "max_speed"] = -kdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf.loc[pdf.max_speed < 2, "max_speed"] = -pdf.max_speed
kdf.loc[kdf.max_speed < 2, "max_speed"] = -kdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf.loc[:, "min_speed"] = 0
kdf.loc[:, "min_speed"] = 0
self.assert_eq(kdf, pdf, almost=True)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
with self.assertRaisesRegex(ValueError, "Incompatible indexer with Series"):
kdf.loc["cobra", "max_speed"] = -kdf.max_speed
with self.assertRaisesRegex(ValueError, "shape mismatch"):
kdf.loc[:, ["shield", "max_speed"]] = -kdf.max_speed
with self.assertRaisesRegex(ValueError, "Only a dataframe with one column can be assigned"):
kdf.loc[:, "max_speed"] = kdf
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "max_speed"), ("x", "shield"), ("y", "min_speed")]
)
pdf.columns = columns
kdf.columns = columns
pdf.loc[:, ("y", "shield")] = -pdf[("x", "shield")]
kdf.loc[:, ("y", "shield")] = -kdf[("x", "shield")]
self.assert_eq(kdf, pdf, almost=True)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf.loc[:, "z"] = 100
kdf.loc[:, "z"] = 100
self.assert_eq(kdf, pdf, almost=True)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
with self.assertRaisesRegex(KeyError, "Key length \\(3\\) exceeds index depth \\(2\\)"):
kdf.loc[:, [("x", "max_speed", "foo")]] = -kdf[("x", "shield")]
pdf = pd.DataFrame(
[[1], [4], [7]], index=["cobra", "viper", "sidewinder"], columns=["max_speed"]
)
kdf = ps.from_pandas(pdf)
pdf.loc[:, "max_speed"] = pdf
kdf.loc[:, "max_speed"] = kdf
self.assert_eq(kdf, pdf)
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf = ps.from_pandas(pdf)
pdf.iloc[[1, 2], [1, 0]] = 10
kdf.iloc[[1, 2], [1, 0]] = 10
self.assert_eq(kdf, pdf)
pdf.iloc[0, 1] = 50
kdf.iloc[0, 1] = 50
self.assert_eq(kdf, pdf)
with self.assertRaisesRegex(ValueError, "setting an array element with a sequence."):
kdf.iloc[0, 0] = -kdf.max_speed
with self.assertRaisesRegex(ValueError, "shape mismatch"):
kdf.iloc[:, [1, 0]] = -kdf.max_speed
with self.assertRaisesRegex(ValueError, "Only a dataframe with one column can be assigned"):
kdf.iloc[:, 0] = kdf
pdf = pd.DataFrame(
[[1], [4], [7]], index=["cobra", "viper", "sidewinder"], columns=["max_speed"]
)
kdf = ps.from_pandas(pdf)
pdf.iloc[:, 0] = pdf
kdf.iloc[:, 0] = kdf
self.assert_eq(kdf, pdf)
def test_series_loc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
pser.loc[pser % 2 == 1] = -pser
kser.loc[kser % 2 == 1] = -kser
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
for key, value in [
(["viper", "sidewinder"], 10),
("viper", 50),
(slice(None), 10),
(slice(None, "viper"), 20),
(slice("viper", None), 30),
]:
with self.subTest(key=key, value=value):
pser.loc[key] = value
kser.loc[key] = value
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
with self.assertRaises(ValueError):
kser.loc["viper"] = -kser
# multiindex
pser = pd.Series(
[1, 2, 3],
index=pd.MultiIndex.from_tuples([("x", "cobra"), ("x", "viper"), ("y", "sidewinder")]),
)
kser = ps.from_pandas(pser)
pser.loc["x"] = pser * 10
kser.loc["x"] = kser * 10
self.assert_eq(kser, pser)
pser.loc["y"] = pser * 10
kser.loc["y"] = kser * 10
self.assert_eq(kser, pser)
if LooseVersion(pd.__version__) < LooseVersion("1.0"):
# TODO: seems like a pandas' bug in pandas>=1.0.0?
pser.loc[("x", "viper"):"y"] = pser * 20
kser.loc[("x", "viper"):"y"] = kser * 20
self.assert_eq(kser, pser)
def test_series_iloc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
piloc = pser.iloc
kiloc = kser.iloc
pser1 = pser + 1
kser1 = kser + 1
for key, value in [
([1, 2], 10),
(1, 50),
(slice(None), 10),
(slice(None, 1), 20),
(slice(1, None), 30),
]:
with self.subTest(key=key, value=value):
pser.iloc[key] = value
kser.iloc[key] = value
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
piloc[key] = -value
kiloc[key] = -value
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pser1.iloc[key] = value
kser1.iloc[key] = value
self.assert_eq(kser1, pser1)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
with self.assertRaises(ValueError):
kser.iloc[1] = -kser
pser = pd.Index([1, 2, 3]).to_series()
kser = ps.Index([1, 2, 3]).to_series()
pser1 = pser + 1
kser1 = kser + 1
pser.iloc[0] = 10
kser.iloc[0] = 10
self.assert_eq(kser, pser)
pser1.iloc[0] = 20
kser1.iloc[0] = 20
self.assert_eq(kser1, pser1)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
kdf = ps.from_pandas(pdf)
pser = pdf.a
kser = kdf.a
pser.iloc[[0, 1, 2]] = -pdf.b
kser.iloc[[0, 1, 2]] = -kdf.b
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
with self.assertRaisesRegex(ValueError, "setting an array element with a sequence."):
kser.iloc[1] = kdf[["b"]]
def test_iloc_raises(self):
pdf = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
kdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(SparkPandasIndexingError, "Only accepts pairs of candidates"):
kdf.iloc[[0, 1], [0, 1], [1, 2]]
with self.assertRaisesRegex(SparkPandasIndexingError, "Too many indexers"):
kdf.A.iloc[[0, 1], [0, 1]]
with self.assertRaisesRegex(TypeError, "cannot do slice indexing with these indexers"):
kdf.iloc[:"b", :]
with self.assertRaisesRegex(TypeError, "cannot do slice indexing with these indexers"):
kdf.iloc[:, :"b"]
with self.assertRaisesRegex(TypeError, "cannot perform reduce with flexible type"):
kdf.iloc[:, ["A"]]
with self.assertRaisesRegex(ValueError, "Location based indexing can only have"):
kdf.iloc[:, "A"]
with self.assertRaisesRegex(IndexError, "out of range"):
kdf.iloc[:, [5, 6]]
def test_index_operator_datetime(self):
dates = pd.date_range("20130101", periods=6)
pdf = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list("ABCD"))
kdf = ps.from_pandas(pdf)
# Positional iloc search
self.assert_eq(kdf[:4], pdf[:4], almost=True)
self.assert_eq(kdf[:3], pdf[:3], almost=True)
self.assert_eq(kdf[3:], pdf[3:], almost=True)
self.assert_eq(kdf[2:], pdf[2:], almost=True)
self.assert_eq(kdf[2:3], pdf[2:3], almost=True)
self.assert_eq(kdf[2:-1], pdf[2:-1], almost=True)
self.assert_eq(kdf[10:3], pdf[10:3], almost=True)
# Index loc search
self.assert_eq(kdf.A[4], pdf.A[4])
self.assert_eq(kdf.A[3], pdf.A[3])
# Positional iloc search
self.assert_eq(kdf.A[:4], pdf.A[:4], almost=True)
self.assert_eq(kdf.A[:3], pdf.A[:3], almost=True)
self.assert_eq(kdf.A[3:], pdf.A[3:], almost=True)
self.assert_eq(kdf.A[2:], pdf.A[2:], almost=True)
self.assert_eq(kdf.A[2:3], pdf.A[2:3], almost=True)
self.assert_eq(kdf.A[2:-1], pdf.A[2:-1], almost=True)
self.assert_eq(kdf.A[10:3], pdf.A[10:3], almost=True)
dt1 = datetime.datetime.strptime("2013-01-02", "%Y-%m-%d")
dt2 = datetime.datetime.strptime("2013-01-04", "%Y-%m-%d")
# Index loc search
self.assert_eq(kdf[:dt2], pdf[:dt2], almost=True)
self.assert_eq(kdf[dt1:], pdf[dt1:], almost=True)
self.assert_eq(kdf[dt1:dt2], pdf[dt1:dt2], almost=True)
self.assert_eq(kdf.A[dt2], pdf.A[dt2], almost=True)
self.assert_eq(kdf.A[:dt2], pdf.A[:dt2], almost=True)
self.assert_eq(kdf.A[dt1:], pdf.A[dt1:], almost=True)
self.assert_eq(kdf.A[dt1:dt2], pdf.A[dt1:dt2], almost=True)
def test_index_operator_int(self):
pdf = pd.DataFrame(np.random.randn(6, 4), index=[1, 3, 5, 7, 9, 11], columns=list("ABCD"))
kdf = ps.from_pandas(pdf)
# Positional iloc search
self.assert_eq(kdf[:4], pdf[:4])
self.assert_eq(kdf[:3], pdf[:3])
self.assert_eq(kdf[3:], pdf[3:])
self.assert_eq(kdf[2:], pdf[2:])
self.assert_eq(kdf[2:3], pdf[2:3])
self.assert_eq(kdf[2:-1], pdf[2:-1])
self.assert_eq(kdf[10:3], pdf[10:3])
# Index loc search
self.assert_eq(kdf.A[5], pdf.A[5])
self.assert_eq(kdf.A[3], pdf.A[3])
with self.assertRaisesRegex(
NotImplementedError, "Duplicated row selection is not currently supported"
):
kdf.iloc[[1, 1]]
if __name__ == "__main__":
from pyspark.pandas.tests.test_indexing import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
davidr/tensorflow-playground
|
projects/tfrecords/record_writer.py
|
1
|
3142
|
#!/usr/bin/env python2.7
import numpy as np
import sklearn.datasets as datasets
import tensorflow as tf
import argparse
def _float64_feature(feature):
return tf.train.Feature(float_list=tf.train.FloatList(value=[feature]))
def _feature_dict_from_row(row):
"""Take row of length n+1 from 2-D ndarray and convert it to a dictionary:
{
'f0': row[0],
'f1': row[1],
...
'fn': row[n]
}
"""
features = {}
# TODO(davidr): switch to a dict comprehension when you're sure this does what
# you think it does
for i in range(len(row)):
features["f{:d}".format(i)] = _float64_feature(row[i])
return features
def gen_regression_dataset(n_samples, n_features):
# We'll say 50% of the features are informative
n_informative = int(n_features * .5)
X, y, intercept = datasets.make_regression(n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
n_targets=1,
noise=0.1,
bias=np.pi,
coef=True,
random_state=31337 # For teh reproducibilities
# (and lulz)
)
# Don't need the intercept yet
return X, y
def write_regression_data_to_tfrecord(X, y, filename):
with tf.python_io.TFRecordWriter('{:s}.tfrecords'.format(filename)) as tfwriter:
for row_index in range(X.shape[0]):
# For 5000x100, this takes about 180s. That seems long, doesn't it?
# TODO(davidr): I think the delay is in _feature_dict_from_row()
# write_data_to_tfrecord(X, y)
features = _feature_dict_from_row(X[row_index])
# We only generated the feature_dict from the feature matrix. Tack on the label
features['label'] = _float64_feature(y[row_index])
example = tf.train.Example(features=tf.train.Features(feature=features))
tfwriter.write(example.SerializeToString())
def tf_recordtest(args):
X, y = gen_regression_dataset(n_samples=args.n_samples, n_features=args.n_features)
write_regression_data_to_tfrecord(X, y, args.filename)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="write some random tfrecords")
parser.add_argument('-s', '--samples', metavar='N', type=int, default=500, dest='n_samples',
help='Number of samples/observations')
parser.add_argument('-f', '--features', metavar='M', type=int, default=10, dest='n_features',
help='Number of features/targets')
parser.add_argument('-F', '--filename', metavar="FILE", dest='filename', required=True,
help='Filename for tfrecord output (will append .tfrecords)')
args = parser.parse_args()
tf_recordtest(args)
|
mit
|
hsuantien/scikit-learn
|
benchmarks/bench_random_projections.py
|
397
|
8900
|
"""
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
|
bsd-3-clause
|
0asa/scikit-learn
|
examples/model_selection/randomized_search.py
|
57
|
3208
|
"""
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
iris = load_digits()
X, y = iris.data, iris.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
|
bsd-3-clause
|
hmendozap/auto-sklearn
|
autosklearn/pipeline/components/classification/qda.py
|
1
|
2620
|
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter
from autosklearn.pipeline.components.base import \
AutoSklearnClassificationAlgorithm
from autosklearn.pipeline.constants import *
from autosklearn.pipeline.implementations.util import softmax
import numpy as np
class QDA(AutoSklearnClassificationAlgorithm):
def __init__(self, reg_param, random_state=None):
self.reg_param = float(reg_param)
self.estimator = None
def fit(self, X, Y):
import sklearn.qda
estimator = sklearn.qda.QDA(self.reg_param)
if len(Y.shape) == 2 and Y.shape[1] > 1:
self.estimator = sklearn.multiclass.OneVsRestClassifier(estimator, n_jobs=1)
else:
self.estimator = estimator
self.estimator.fit(X, Y)
if len(Y.shape) == 2 and Y.shape[1] > 1:
problems = []
for est in self.estimator.estimators_:
problem = np.any(np.any([np.any(s <= 0.0) for s in
est.scalings_]))
problems.append(problem)
problem = np.any(problems)
else:
problem = np.any(np.any([np.any(s <= 0.0) for s in
self.estimator.scalings_]))
if problem:
raise ValueError('Numerical problems in QDA. QDA.scalings_ '
'contains values <= 0.0')
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
df = self.estimator.predict_proba(X)
return softmax(df)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'QDA',
'name': 'Quadratic Discriminant Analysis',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'input': (DENSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
reg_param = UniformFloatHyperparameter('reg_param', 0.0, 10.0,
default=0.5)
cs = ConfigurationSpace()
cs.add_hyperparameter(reg_param)
return cs
|
bsd-3-clause
|
3manuek/scikit-learn
|
sklearn/cluster/setup.py
|
263
|
1449
|
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
ProjectsUCSC/CMPS296
|
src/others/cornell_film_parse.py
|
1
|
9179
|
EN_WHITELIST = '0123456789abcdefghijklmnopqrstuvwxyz ' # space is included in whitelist
EN_BLACKLIST = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\''
limit = {
'maxq' : 25,
'minq' : 2,
'maxa' : 25,
'mina' : 2
}
UNK = 'unk'
VOCAB_SIZE = 8000
import random
import nltk
import itertools
from collections import defaultdict
import numpy as np
import pickle
'''
1. Read from 'movie-lines.txt'
2. Create a dictionary with ( key = line_id, value = text )
'''
def get_id2line():
lines=open('/Users/shubhi/Public/CMPS296/cornell_film/movie_lines.txt', encoding='utf-8', errors='ignore').read().split('\n')
id2line = {}
for line in lines:
_line = line.split(' +++$+++ ')
if len(_line) == 5:
id2line[_line[0]] = _line[4]
return id2line
'''
1. Read from 'movie_conversations.txt'
2. Create a list of [list of line_id's]
'''
def get_conversations():
conv_lines = open('/Users/shubhi/Public/CMPS296/cornell_film/movie_conversations.txt', encoding='utf-8', errors='ignore').read().split('\n')
convs = [ ]
for line in conv_lines[:-1]:
_line = line.split(' +++$+++ ')[-1][1:-1].replace("'","").replace(" ","")
convs.append(_line.split(','))
return convs
'''
1. Get each conversation
2. Get each line from conversation
3. Save each conversation to file
'''
def extract_conversations(convs,id2line,path=''):
idx = 0
for conv in convs:
f_conv = open(path + str(idx)+'.txt', 'w')
for line_id in conv:
f_conv.write(id2line[line_id])
f_conv.write('\n')
f_conv.close()
idx += 1
'''
Get lists of all conversations as Questions and Answers
1. [questions]
2. [answers]
'''
def gather_dataset(convs, id2line):
questions = []; answers = []
for conv in convs:
if len(conv) %2 != 0:
conv = conv[:-1]
for i in range(len(conv)):
if i%2 == 0:
questions.append(id2line[conv[i]])
else:
answers.append(id2line[conv[i]])
return questions, answers
'''
We need 4 files
1. train.enc : Encoder input for training
2. train.dec : Decoder input for training
3. test.enc : Encoder input for testing
4. test.dec : Decoder input for testing
'''
import pandas as pd
def prepare_seq2seq_files(questions, answers, path='',TESTSET_SIZE = 30000):
#df = pd.DataFrame(data=[questions.T, answers.T])
data = np.column_stack((questions, answers))
df =pd.DataFrame(columns = ['ques', 'ans'], data = data)
df.to_csv("cornell_data.csv")
return df
'''
remove anything that isn't in the vocabulary
return str(pure en)
'''
def filter_line(line, whitelist):
return ''.join([ ch for ch in line if ch in whitelist ])
'''
filter too long and too short sequences
return tuple( filtered_ta, filtered_en )
'''
def filter_data(qseq, aseq):
filtered_q, filtered_a = [], []
raw_data_len = len(qseq)
assert len(qseq) == len(aseq)
for i in range(raw_data_len):
qlen, alen = len(qseq[i].split(' ')), len(aseq[i].split(' '))
if qlen >= limit['minq'] and qlen <= limit['maxq']:
if alen >= limit['mina'] and alen <= limit['maxa']:
filtered_q.append(qseq[i])
filtered_a.append(aseq[i])
# print the fraction of the original data, filtered
filt_data_len = len(filtered_q)
filtered = int((raw_data_len - filt_data_len)*100/raw_data_len)
print(str(filtered) + '% filtered from original data')
return filtered_q, filtered_a
'''
read list of words, create index to word,
word to index dictionaries
return tuple( vocab->(word, count), idx2w, w2idx )
'''
def index_(tokenized_sentences, vocab_size):
# get frequency distribution
freq_dist = nltk.FreqDist(itertools.chain(*tokenized_sentences))
# get vocabulary of 'vocab_size' most used words
vocab = freq_dist.most_common(vocab_size)
# index2word
index2word = ['_'] + [UNK] + [ x[0] for x in vocab ]
# word2index
word2index = dict([(w,i) for i,w in enumerate(index2word)] )
return index2word, word2index, freq_dist
'''
filter based on number of unknowns (words not in vocabulary)
filter out the worst sentences
'''
def filter_unk(qtokenized, atokenized, w2idx):
data_len = len(qtokenized)
filtered_q, filtered_a = [], []
for qline, aline in zip(qtokenized, atokenized):
unk_count_q = len([ w for w in qline if w not in w2idx ])
unk_count_a = len([ w for w in aline if w not in w2idx ])
if unk_count_a <= 2:
if unk_count_q > 0:
if unk_count_q/len(qline) > 0.2:
pass
filtered_q.append(qline)
filtered_a.append(aline)
# print the fraction of the original data, filtered
filt_data_len = len(filtered_q)
filtered = int((data_len - filt_data_len)*100/data_len)
print(str(filtered) + '% filtered from original data')
return filtered_q, filtered_a
'''
create the final dataset :
- convert list of items to arrays of indices
- add zero padding
return ( [array_en([indices]), array_ta([indices]) )
'''
def zero_pad(qtokenized, atokenized, w2idx):
# num of rows
data_len = len(qtokenized)
# numpy arrays to store indices
idx_q = np.zeros([data_len, limit['maxq']], dtype=np.int32)
idx_a = np.zeros([data_len, limit['maxa']], dtype=np.int32)
for i in range(data_len):
q_indices = pad_seq(qtokenized[i], w2idx, limit['maxq'])
a_indices = pad_seq(atokenized[i], w2idx, limit['maxa'])
#print(len(idx_q[i]), len(q_indices))
#print(len(idx_a[i]), len(a_indices))
idx_q[i] = np.array(q_indices)
idx_a[i] = np.array(a_indices)
return idx_q, idx_a
'''
replace words with indices in a sequence
replace with unknown if word not in lookup
return [list of indices]
'''
def pad_seq(seq, lookup, maxlen):
indices = []
for word in seq:
if word in lookup:
indices.append(lookup[word])
else:
indices.append(lookup[UNK])
return indices + [0]*(maxlen - len(seq))
def process_data():
id2line = get_id2line()
print('>> gathered id2line dictionary.\n')
convs = get_conversations()
print(convs[121:125])
print('>> gathered conversations.\n')
questions, answers = gather_dataset(convs,id2line)
# change to lower case (just for en)
questions = [ line.lower() for line in questions ]
answers = [ line.lower() for line in answers ]
# filter out unnecessary characters
print('\n>> Filter lines')
questions = [ filter_line(line, EN_WHITELIST) for line in questions ]
answers = [ filter_line(line, EN_WHITELIST) for line in answers ]
# filter out too long or too short sequences
print('\n>> 2nd layer of filtering')
qlines, alines = filter_data(questions, answers)
for q,a in zip(qlines[141:145], alines[141:145]):
print('q : [{0}]; a : [{1}]'.format(q,a))
# convert list of [lines of text] into list of [list of words ]
print('\n>> Segment lines into words')
qtokenized = [ [w.strip() for w in wordlist.split(' ') if w] for wordlist in qlines ]
atokenized = [ [w.strip() for w in wordlist.split(' ') if w] for wordlist in alines ]
print('\n:: Sample from segmented list of words')
for q,a in zip(qtokenized[141:145], atokenized[141:145]):
print('q : [{0}]; a : [{1}]'.format(q,a))
# indexing -> idx2w, w2idx
print('\n >> Index words')
idx2w, w2idx, freq_dist = index_( qtokenized + atokenized, vocab_size=VOCAB_SIZE)
# filter out sentences with too many unknowns
print('\n >> Filter Unknowns')
qtokenized, atokenized = filter_unk(qtokenized, atokenized, w2idx)
print('\n Final dataset len : ' + str(len(qtokenized)))
print('\n >> Zero Padding')
idx_q, idx_a = zero_pad(qtokenized, atokenized, w2idx)
print('\n >> Save numpy arrays to disk')
# save them
np.save('idx_q.npy', idx_q)
np.save('idx_a.npy', idx_a)
# let us now save the necessary dictionaries
metadata = {
'w2idx' : w2idx,
'idx2w' : idx2w,
'limit' : limit,
'freq_dist' : freq_dist
}
# write to disk : data control dictionaries
with open('metadata.pkl', 'wb') as f:
pickle.dump(metadata, f)
# count of unknowns
unk_count = (idx_q == 1).sum() + (idx_a == 1).sum()
# count of words
word_count = (idx_q > 1).sum() + (idx_a > 1).sum()
print('% unknown : {0}'.format(100 * (unk_count/word_count)))
print('Dataset count : ' + str(idx_q.shape[0]))
print ('>> gathered questions and answers.\n')
return prepare_seq2seq_files(questions,answers)
if __name__ == '__main__':
process_data()
def load_data(PATH=''):
# read data control dictionaries
with open(PATH + 'metadata.pkl', 'rb') as f:
metadata = pickle.load(f)
# read numpy arrays
idx_q = np.load(PATH + 'idx_q.npy')
idx_a = np.load(PATH + 'idx_a.npy')
return metadata, idx_q, idx_a
|
mit
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sklearn/gaussian_process/tests/test_gaussian_process.py
|
5
|
3208
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# License: BSD style
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a one-dimensional Gaussian Process model.
Check random start optimization.
Test the interpolating property.
"""
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
y = f(X).ravel()
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a two-dimensional Gaussian Process model accounting for
anisotropy. Check random start optimization.
Test the interpolating property.
"""
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
"""
Repeat test_1d and test_2d for several built-in correlation
models specified as strings.
"""
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
"""
Repeat test_1d and test_2d with given regression weights (beta0) for
different regression models (Ordinary Kriging).
"""
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
|
agpl-3.0
|
edgarRd/incubator-airflow
|
airflow/contrib/hooks/salesforce_hook.py
|
10
|
12428
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a Salesforce Hook
which allows you to connect to your Salesforce instance,
retrieve data from it, and write that data to a file
for other uses.
NOTE: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
from simple_salesforce import Salesforce
from airflow.hooks.base_hook import BaseHook
import json
import pandas as pd
import time
from airflow.utils.log.logging_mixin import LoggingMixin
class SalesforceHook(BaseHook, LoggingMixin):
def __init__(
self,
conn_id,
*args,
**kwargs
):
"""
Create new connection to Salesforce
and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other
Airflow operators to move the data into another data source
:param conn_id: the name of the connection that has the parameters
we need to connect to Salesforce.
The connection shoud be type `http` and include a
user's security token in the `Extras` field.
.. note::
For the HTTP connection type, you can include a
JSON structure in the `Extras` field.
We need a user's security token to connect to Salesforce.
So we define it in the `Extras` field as:
`{"security_token":"YOUR_SECURITY_TOKEN"}`
"""
self.conn_id = conn_id
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson
def sign_in(self):
"""
Sign into Salesforce.
If we have already signed it, this will just return the original object
"""
if hasattr(self, 'sf'):
return self.sf
# connect to Salesforce
sf = Salesforce(
username=self.connection.login,
password=self.connection.password,
security_token=self.extras['security_token'],
instance_url=self.connection.host,
sandbox=self.extras.get('sandbox', False)
)
self.sf = sf
return sf
def make_query(self, query):
"""
Make a query to Salesforce. Returns result in dictionary
:param query: The query to make to Salesforce
"""
self.sign_in()
self.log.info("Querying for all objects")
query = self.sf.query_all(query)
self.log.info(
"Received results: Total size: %s; Done: %s",
query['totalSize'], query['done']
)
query = json.loads(json.dumps(query))
return query
def describe_object(self, obj):
"""
Get the description of an object from Salesforce.
This description is the object's schema
and some extra metadata that Salesforce stores for each object
:param obj: Name of the Salesforce object
that we are getting a description of.
"""
self.sign_in()
return json.loads(json.dumps(self.sf.__getattr__(obj).describe()))
def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
This only returns the names of the fields.
"""
self.sign_in()
desc = self.describe_object(obj)
return [f['name'] for f in desc['fields']]
@staticmethod
def _build_field_list(fields):
# join all of the fields in a comma separated list
return ",".join(fields)
def get_object_from_salesforce(self, obj, fields):
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
"""
field_string = self._build_field_list(fields)
query = "SELECT {0} FROM {1}".format(field_string, obj)
self.log.info(
"Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]])
)
return self.make_query(query)
@classmethod
def _to_timestamp(cls, col):
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param col: A Series object representing a column of a dataframe.
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
col = pd.to_datetime(col)
except ValueError:
log = LoggingMixin().log
log.warning(
"Could not convert field to timestamps: %s", col.name
)
return col
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for i in col:
try:
converted.append(i.timestamp())
except ValueError:
converted.append(pd.np.NaN)
except AttributeError:
converted.append(pd.np.NaN)
# return a new series that maintains the same index as the original
return pd.Series(converted, index=col.index)
def write_object_to_file(
self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False
):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-separated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line delimited
instead of comma delimited like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV,
but as millisecond Unix timestamps.
By default, this function will try and leave all values as
they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes
to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your
datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param filename: the name of the file where the data
should be dumped to
:param fmt: the format you want the output in.
*Default:* csv.
:param coerce_to_timestamp: True if you want all datetime fields to be
converted into Unix timestamps.
False if you want them to be left in the
same format as they were in Salesforce.
Leaving the value as False will result
in datetimes being strings.
*Defaults to False*
:param record_time_added: *(optional)* True if you want to add a
Unix timestamp field to the resulting data
that marks when the data
was fetched from Salesforce.
*Default: False*.
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {0}".format(fmt))
# this line right here will convert all integers to floats if there are
# any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [c.lower() for c in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be convereted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
i['name'].lower()
for i in schema['fields']
if i['type'] in ["date", "datetime"] and
i['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(
lambda x: self._to_timestamp(x)
)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects
# that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\r\n", "")
)
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\n", "")
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
|
apache-2.0
|
alekseik1/python_mipt_study_1-2
|
1sem/lesson_6/equalize.py
|
2
|
2844
|
import numpy as np
import random
import matplotlib.pyplot as plt
import pylab
def get_percentile(values, bucket_number):
p = 100/bucket_number
res = []
count = 0
res.append(0.0)
count += p
while count < 100:
res.append(np.percentile(values, count))
count += p
return res
def get_percentile_number(value, percentiles):
i = 0
while percentiles[i] <= value:
i += 1
if i >= len(percentiles):
i -= 1
return i
return i-1
def value_equalization(value, percentiles, addRandom=False, add_random=False):
if add_random: addRandom = True
if not addRandom:
idx = get_percentile_number(value, percentiles)
step = 1 / len(percentiles)
value = idx*step
return value
else:
idx = get_percentile_number(value, percentiles)
step = 1 / len(percentiles)
random_noise = random.uniform(0, step)
value = idx*step + random_noise
return value
def values_equalization(values, percentiles, addRandom=False, add_random=False):
if add_random: addRandom = True
res = []
for i in values:
res.append(value_equalization(i, percentiles, addRandom))
return res
v = [3, 4, 1, 2, 5, 6, 7, 8, 9, 10]
p = (get_percentile(v, 4))
#print(p)
#print(get_percentile_number(7.75, p))
#print(get_percentile_number(5.5, p))
#print(values_equalization(v, p))
#print(values_equalization(v, p, addRandom=True))
s = []
with open('img.txt', 'r') as f:
for line in f:
v = list(map(float, line.strip().split()))
s.append(v)
#print(s)
data = np.array(s)
plt.subplot(221)
plt.imshow(data, cmap=plt.get_cmap('gray'))
plt.subplot(222)
val = [data.flatten()]
plt.hist(val, bins=10)
#n = int(input())
p = get_percentile(val, 3)
new_data = np.array(values_equalization(data.flatten(), p, addRandom=True))
ready = new_data.reshape(200, 267)
if __name__=="__main__":
for i in range(1, 50):
data = np.array(s)
p = get_percentile(val, i)
new_data = np.array(values_equalization(data.flatten(), p, addRandom=True))
ready = new_data.reshape(200, 267)
plt.subplot(223)
plt.imshow(ready, cmap=plt.get_cmap('gray'))
pylab.pause(1)
new_data = np.array(values_equalization(data.flatten(), p, addRandom=True))
ready = new_data.reshape(200, 267)
plt.subplot(224)
data = [ready.flatten()]
plt.hist(data, bins=10)
# plt.subplot(325)
plt.show()
else:
data = np.array(s)
p = get_percentile(val, 10)
new_data = np.array(values_equalization(data.flatten(), p, addRandom=True))
ready = new_data.reshape(200, 267)
new_data = np.array(values_equalization(data.flatten(), p, addRandom=True))
ready = new_data.reshape(200, 267)
data = [ready.flatten()]
|
gpl-3.0
|
sychen/rst2pdf
|
setup.py
|
6
|
3121
|
# -*- coding: utf-8 -*-
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import os
from setuptools import setup, find_packages
version = '0.93'
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = (
read('LICENSE.txt')
+ '\n' +
'Detailed Documentation\n'
'**********************\n'
+ '\n' +
read('README.txt')
+ '\n' +
'Contributors\n'
'************\n'
+ '\n' +
read('Contributors.txt')
+ '\n' +
'Change history\n'
'**************\n'
+ '\n' +
read('CHANGES.txt')
+ '\n' +
'Download\n'
'********\n'
)
install_requires = [
'setuptools',
'docutils',
'reportlab>=2.4',
'Pygments',
'pdfrw',
]
try:
import json
except ImportError:
install_requires.append('simplejson')
tests_require = ['pyPdf']
sphinx_require = ['sphinx']
hyphenation_require = ['wordaxe>=1.0']
images_require = ['PIL']
pdfimages_require = ['pyPdf','PythonMagick']
pdfimages2_require = ['pyPdf','SWFTools']
svgsupport_require = ['svg2rlg']
aafiguresupport_require = ['aafigure>=0.4']
mathsupport_require = ['matplotlib']
rawhtmlsupport_require = ['xhtml2pdf']
setup(
name="rst2pdf",
version=version,
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
package_data=dict(rst2pdf=['styles/*.json',
'styles/*.style',
'images/*png',
'images/*jpg',
'templates/*tmpl'
]),
include_package_data=True,
dependency_links=[
],
install_requires=install_requires,
tests_require=tests_require,
extras_require=dict(
tests=tests_require,
sphinx=sphinx_require,
hyphenation=hyphenation_require,
images=images_require,
pdfimages=pdfimages_require,
pdfimages2=pdfimages2_require,
svgsupport=svgsupport_require,
aafiguresupport=aafiguresupport_require,
mathsupport=mathsupport_require,
rawhtmlsupport=rawhtmlsupport_require,
),
# metadata for upload to PyPI
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing',
'Topic :: Utilities',
],
author="Roberto Alsina",
author_email="ralsina at netmanagers dot com dot ar",
description="Convert restructured text to PDF via reportlab.",
long_description=long_description,
license="MIT",
keywords="restructured convert rst pdf docutils pygments reportlab",
url="http://rst2pdf.googlecode.com",
download_url="http://code.google.com/p/rst2pdf/downloads/list",
entry_points={'console_scripts': ['rst2pdf = rst2pdf.createpdf:main']},
test_suite='rst2pdf.tests.test_rst2pdf.test_suite',
)
|
mit
|
ElDeveloper/scikit-learn
|
sklearn/datasets/tests/test_20news.py
|
280
|
3045
|
"""Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
|
bsd-3-clause
|
gfyoung/pandas
|
pandas/tests/arithmetic/conftest.py
|
2
|
5751
|
import numpy as np
import pytest
import pandas as pd
from pandas import Float64Index, Int64Index, RangeIndex, UInt64Index
import pandas._testing as tm
# ------------------------------------------------------------------
# Helper Functions
def id_func(x):
if isinstance(x, tuple):
assert len(x) == 2
return x[0].__name__ + "-" + str(x[1])
else:
return x.__name__
# ------------------------------------------------------------------
@pytest.fixture(params=[1, np.array(1, dtype=np.int64)])
def one(request):
"""
Several variants of integer value 1. The zero-dim integer array
behaves like an integer.
This fixture can be used to check that datetimelike indexes handle
addition and subtraction of integers and zero-dimensional arrays
of integers.
Examples
--------
>>> dti = pd.date_range('2016-01-01', periods=2, freq='H')
>>> dti
DatetimeIndex(['2016-01-01 00:00:00', '2016-01-01 01:00:00'],
dtype='datetime64[ns]', freq='H')
>>> dti + one
DatetimeIndex(['2016-01-01 01:00:00', '2016-01-01 02:00:00'],
dtype='datetime64[ns]', freq='H')
"""
return request.param
zeros = [
box_cls([0] * 5, dtype=dtype)
for box_cls in [pd.Index, np.array, pd.array]
for dtype in [np.int64, np.uint64, np.float64]
]
zeros.extend(
[box_cls([-0.0] * 5, dtype=np.float64) for box_cls in [pd.Index, np.array]]
)
zeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]])
zeros.extend([np.array(-0.0, dtype=np.float64)])
zeros.extend([0, 0.0, -0.0])
@pytest.fixture(params=zeros)
def zero(request):
"""
Several types of scalar zeros and length 5 vectors of zeros.
This fixture can be used to check that numeric-dtype indexes handle
division by any zero numeric-dtype.
Uses vector of length 5 for broadcasting with `numeric_idx` fixture,
which creates numeric-dtype vectors also of length 5.
Examples
--------
>>> arr = RangeIndex(5)
>>> arr / zeros
Float64Index([nan, inf, inf, inf, inf], dtype='float64')
"""
return request.param
# ------------------------------------------------------------------
# Vector Fixtures
@pytest.fixture(
params=[
Float64Index(np.arange(5, dtype="float64")),
Int64Index(np.arange(5, dtype="int64")),
UInt64Index(np.arange(5, dtype="uint64")),
RangeIndex(5),
],
ids=lambda x: type(x).__name__,
)
def numeric_idx(request):
"""
Several types of numeric-dtypes Index objects
"""
return request.param
# ------------------------------------------------------------------
# Scalar Fixtures
@pytest.fixture(
params=[
pd.Timedelta("5m4s").to_pytimedelta(),
pd.Timedelta("5m4s"),
pd.Timedelta("5m4s").to_timedelta64(),
],
ids=lambda x: type(x).__name__,
)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(
params=[
pd.offsets.Day(3),
pd.offsets.Hour(72),
pd.Timedelta(days=3).to_pytimedelta(),
pd.Timedelta("72:00:00"),
np.timedelta64(3, "D"),
np.timedelta64(72, "h"),
],
ids=lambda x: type(x).__name__,
)
def three_days(request):
"""
Several timedelta-like and DateOffset objects that each represent
a 3-day timedelta
"""
return request.param
@pytest.fixture(
params=[
pd.offsets.Hour(2),
pd.offsets.Minute(120),
pd.Timedelta(hours=2).to_pytimedelta(),
pd.Timedelta(seconds=2 * 3600),
np.timedelta64(2, "h"),
np.timedelta64(120, "m"),
],
ids=lambda x: type(x).__name__,
)
def two_hours(request):
"""
Several timedelta-like and DateOffset objects that each represent
a 2-hour timedelta
"""
return request.param
_common_mismatch = [
pd.offsets.YearBegin(2),
pd.offsets.MonthBegin(1),
pd.offsets.Minute(),
]
@pytest.fixture(
params=[
pd.Timedelta(minutes=30).to_pytimedelta(),
np.timedelta64(30, "s"),
pd.Timedelta(seconds=30),
]
+ _common_mismatch
)
def not_hourly(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Hourly frequencies.
"""
return request.param
@pytest.fixture(
params=[
np.timedelta64(4, "h"),
pd.Timedelta(hours=23).to_pytimedelta(),
pd.Timedelta("23:00:00"),
]
+ _common_mismatch
)
def not_daily(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Daily frequencies.
"""
return request.param
@pytest.fixture(
params=[
np.timedelta64(365, "D"),
pd.Timedelta(days=365).to_pytimedelta(),
pd.Timedelta(days=365),
]
+ _common_mismatch
)
def mismatched_freq(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Monthly or Annual frequencies.
"""
return request.param
# ------------------------------------------------------------------
@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame, pd.array], ids=id_func)
def box_with_array(request):
"""
Fixture to test behavior for Index, Series, DataFrame, and pandas Array
classes
"""
return request.param
@pytest.fixture(params=[pd.Index, pd.Series, tm.to_array, np.array, list], ids=id_func)
def box_1d_array(request):
"""
Fixture to test behavior for Index, Series, tm.to_array, numpy Array and list
classes
"""
return request.param
# alias so we can use the same fixture for multiple parameters in a test
box_with_array2 = box_with_array
|
bsd-3-clause
|
ctorney/socialInfluence
|
py_analysis/simSquare.py
|
1
|
4072
|
#!/usr/bin/python
import sympy as sp
import scipy as sc
from scipy.signal import lfilter
from IPython.display import display
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
from math import *
import matplotlib as mpl
from pylab import *
K = 8
wg = 0.875
ws = 0.65
def psw( j ):
gc = np.log(ws/(1-ws))*(K-2*j)/(4*wg)
return 0.5 + 0.5*math.erf(math.sqrt(wg)*(1.0-gc))
# Function definition is here
def tup( x ):
return (1-x) * sum(sp.binomial(K,j) * x**j * (1-x)**(K-j) * psw(j) for j in xrange(0,K+1))
def tdown( x ):
return (x) * (1 - sum(sp.binomial(K,j) * x**j * (1-x)**(K-j) * psw(j) for j in xrange(0,K+1)))
Nx = 8
NA = Nx * Nx
NT=5000
alpha = 1
wgs = 0.01 + wg*1.0*(rand(Nx, Nx)<14.0/64.0)
a = exp(-wgs)
b = (1-a)
c = sqrt((1.0-a*a)/(2.0*wgs))
for acount in range(10,11):
alpha = 0.0#0.1*float(acount)
counts = np.zeros(NA+1)
ups = np.zeros(NA+1)
downs = np.zeros(NA+1)
ccounts = np.zeros(NA+1)
dcounts = np.zeros(NA+1)
cups = np.zeros(NA+1)
cdowns = np.zeros(NA+1)
conds = np.zeros(NA+1)
for av in range(100):
print av
G=np.ones((Nx,Nx))
u=np.zeros((Nx,Nx))
nextCount = np.sum(u)
for t in range(1,NT):
thisCount = nextCount
G = a*G + (1.0-a) + c * randn(Nx,Nx)
ix = randint(0,Nx)
iy = randint(0,Nx)
i = iy * Nx + ix
neigh = np.array( ( ( 1, 1 ), ( 1, 0 ), ( 1, -1 ) , ( 0, 1 ), ( 0, -1 ), ( -1, -1 ) , ( -1, 0 ), ( -1, 1 ) ))
deltan = 0
for e in range(K):
if rand()<alpha:
n2 = randint(0,K)
x_n = (((ix + neigh[n2,0]) % Nx) + Nx) % Nx;
y_n = (((iy + neigh[n2,1]) % Nx) + Nx) % Nx;
if (u[x_n,y_n]>0.5):
deltan = deltan + 1
else:
x_n = randint(0,Nx-1)
y_n = randint(0,Nx-1)
if (u[x_n,y_n]>0.5):
deltan = deltan + 1
if (u[ix,iy] >0.5):
cups[thisCount] += deltan / float(K)
ccounts[thisCount]+=1
else:
cdowns[thisCount] += deltan / float(K)
dcounts[thisCount]+=1
conds[thisCount] += deltan / float(K)
deltan = deltan*2
deltan = deltan - K
pup = exp(-4.0*wgs[ix,iy]*G[ix,iy])
pall = pup*(((1.0 - ws)/ws)**deltan)
if (pall<1.0):
u[ix,iy] = 1
else:
u[ix,iy] = 0
counts[thisCount]+=1
nextCount = np.sum(u)
if (nextCount>thisCount):
ups[thisCount]+=1
if (nextCount<thisCount):
downs[thisCount]+=1
if t % 100000000 == 0 :
print np.sum(u)
#plt.imshow(u, extent=[0,1,0,1], aspect='equal', vmin=0, vmax=1)
#plt.set_cmap('hot')
#fileName = "/home/ctorney/tmp_frames/" + '{0:05d}'.format(t) +".png"
#plt.savefig(fileName)
ups = np.divide(ups,counts)
downs = np.divide(downs,counts)
cups = np.divide(cups,ccounts)
cdowns = np.divide(cdowns,dcounts)
conds = np.divide(conds,counts)
xGrid=np.arange(65)/64.0
plt.plot(xGrid,ups,marker='o',label='sim up')
plt.plot(xGrid,downs,marker='o',label='sim up')
outdata = np.vstack((ups,downs))
# for r in ups: print r
# for r in downs: print r
#outfile = "potential1" + '{0:02d}'.format(int(acount)) +".npy"
#np.save(outfile, outdata)
|
mit
|
rahuldhote/scikit-learn
|
sklearn/utils/testing.py
|
84
|
24860
|
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
|
bsd-3-clause
|
ZENGXH/scikit-learn
|
examples/linear_model/plot_ard.py
|
248
|
2622
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
bsd-3-clause
|
mzivi/Libra
|
LogisticRegression.py
|
1
|
2100
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 22 15:49:38 2015
@author: Michele
"""
import numpy as np
import random as rnd
class LogisticRegression(object):
"""Logistic regression based on sigmoid function and trained via gradient
descent method.
"""
def __init__(self, theta=None):
"""Provide the initial values for the parameters. If theta is not given,
it will be inferred from the design matrix.
"""
self._theta = theta
def _sigmoid(self, x):
return 1 / (1 + np.exp(-x))
@property
def theta(self):
return self._theta
def train(self, x, y, alpha, stop_thr, max_iter = 100):
if not self._theta:
# TODO: this should actually init to random epsilons
self._theta = np.array([rnd.random() * 0.1 for i in xrange(x.shape[1])])
# TODO: elsif check theta is consistent with x
m = x.shape[0]
h = self._sigmoid(np.dot(x, self._theta))
cost_fn = np.empty(max_iter)
cost_fn[0] = np.sum(- y * np.log(h) - (1 - y) * np.log(1 - h)) / m
for i in xrange(max_iter):
cost_grad = np.sum(x * (h-y)[:, np.newaxis], axis=0) / m
self._theta = self._theta - alpha * cost_grad
h = self._sigmoid(np.dot(x, self._theta))
cost_fn[i+1] = np.sum(- y * np.log(h) - (1 - y) * np.log(1 - h)) / m
if np.abs(cost_fn[i] - cost_fn[i+1]) < stop_thr:
cost_fn = np.resize(cost_fn, i + 1)
break
return cost_fn
def predict(self, x, thr=0.5):
if self._theta is None:
return
return self._sigmoid(np.dot(x, self._theta)) > thr
if __name__ == '__main__':
from matplotlib.pyplot import plot
l = LogisticRegression()
x = np.array([[1., 1.],[1., 2.], [1., 1.1],[1.,1.9]], dtype=float)
y = np.array([1, 0, 1, 0], dtype=float).T
rnd.seed(17)
cost_fn = l.train(x, y, alpha=0.5, stop_thr=0.0001, max_iter=10000)
plot(cost_fn)
print l.theta
print l.predict(x)
|
mit
|
dkushner/zipline
|
zipline/finance/performance/tracker.py
|
14
|
23349
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performance Tracking
====================
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| period_start | The beginning of the period to be tracked. datetime|
| | in pytz.utc timezone. Will always be 0:00 on the |
| | date in UTC. The fact that the time may be on the |
| | prior day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| period_end | The end of the period to be tracked. datetime |
| | in pytz.utc timezone. Will always be 23:59 on the |
| | date in UTC. The fact that the time may be on the |
| | next day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| progress | percentage of test completed |
+-----------------+----------------------------------------------------+
| capital_base | The initial capital assumed for this tracker. |
+-----------------+----------------------------------------------------+
| cumulative_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
+-----------------+----------------------------------------------------+
| todays_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker with datetime stamps between last_open|
| | and last_close. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
| | TODO: adding this because we calculate it. May be |
| | overkill. |
+-----------------+----------------------------------------------------+
| cumulative_risk | A dictionary representing the risk metrics |
| _metrics | calculated based on the positions aggregated |
| | through all the events delivered to this tracker. |
| | For details look at the comments for |
| | :py:meth:`zipline.finance.risk.RiskMetrics.to_dict`|
+-----------------+----------------------------------------------------+
"""
from __future__ import division
import logbook
import pickle
from six import iteritems
from datetime import datetime
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
import zipline.finance.risk as risk
from zipline.finance.trading import TradingEnvironment
from . period import PerformancePeriod
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
from . position_tracker import PositionTracker
log = logbook.Logger('Performance')
class PerformanceTracker(object):
"""
Tracks the performance of the algorithm.
"""
def __init__(self, sim_params):
self.sim_params = sim_params
env = TradingEnvironment.instance()
self.period_start = self.sim_params.period_start
self.period_end = self.sim_params.period_end
self.last_close = self.sim_params.last_close
first_open = self.sim_params.first_open.tz_convert(env.exchange_tz)
self.day = pd.Timestamp(datetime(first_open.year, first_open.month,
first_open.day), tz='UTC')
self.market_open, self.market_close = env.get_open_and_close(self.day)
self.total_days = self.sim_params.days_in_period
self.capital_base = self.sim_params.capital_base
self.emission_rate = sim_params.emission_rate
all_trading_days = env.trading_days
mask = ((all_trading_days >= normalize_date(self.period_start)) &
(all_trading_days <= normalize_date(self.period_end)))
self.trading_days = all_trading_days[mask]
self.dividend_frame = pd.DataFrame()
self._dividend_count = 0
self.position_tracker = PositionTracker()
self.perf_periods = []
if self.emission_rate == 'daily':
self.all_benchmark_returns = pd.Series(
index=self.trading_days)
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params)
elif self.emission_rate == 'minute':
self.all_benchmark_returns = pd.Series(index=pd.date_range(
self.sim_params.first_open, self.sim_params.last_close,
freq='Min'))
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params,
create_first_day_stats=True)
self.minute_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the cumulative period will be calculated over the
# entire test.
self.period_start,
self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumualtive period
serialize_positions=False
)
self.minute_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.minute_performance)
# this performance period will span the entire simulation from
# inception.
self.cumulative_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the cumulative period will be calculated over the entire test.
self.period_start,
self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumualtive period
serialize_positions=False,
)
self.cumulative_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.cumulative_performance)
# this performance period will span just the current market day
self.todays_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the daily period will be calculated for the market day
self.market_open,
self.market_close,
keep_transactions=True,
keep_orders=True,
serialize_positions=True,
)
self.todays_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.todays_performance)
self.saved_dt = self.period_start
# one indexed so that we reach 100%
self.day_count = 0.0
self.txn_count = 0
self.account_needs_update = True
self._account = None
def __repr__(self):
return "%s(%r)" % (
self.__class__.__name__,
{'simulation parameters': self.sim_params})
@property
def progress(self):
if self.emission_rate == 'minute':
# Fake a value
return 1.0
elif self.emission_rate == 'daily':
return self.day_count / self.total_days
def set_date(self, date):
if self.emission_rate == 'minute':
self.saved_dt = date
self.todays_performance.period_close = self.saved_dt
def update_dividends(self, new_dividends):
"""
Update our dividend frame with new dividends. @new_dividends should be
a DataFrame with columns containing at least the entries in
zipline.protocol.DIVIDEND_FIELDS.
"""
# Mark each new dividend with a unique integer id. This ensures that
# we can differentiate dividends whose date/sid fields are otherwise
# identical.
new_dividends['id'] = np.arange(
self._dividend_count,
self._dividend_count + len(new_dividends),
)
self._dividend_count += len(new_dividends)
self.dividend_frame = pd.concat(
[self.dividend_frame, new_dividends]
).sort(['pay_date', 'ex_date']).set_index('id', drop=False)
def initialize_dividends_from_other(self, other):
"""
Helper for copying dividends to a new PerformanceTracker while
preserving dividend count. Useful if a simulation needs to create a
new PerformanceTracker mid-stream and wants to preserve stored dividend
info.
Note that this does not copy unpaid dividends.
"""
self.dividend_frame = other.dividend_frame
self._dividend_count = other._dividend_count
def handle_sid_removed_from_universe(self, sid):
"""
This method handles any behaviors that must occur when a SID leaves the
universe of the TradingAlgorithm.
Parameters
__________
sid : int
The sid of the Asset being removed from the universe.
"""
# Drop any dividends for the sid from the dividends frame
self.dividend_frame = self.dividend_frame[
self.dividend_frame.sid != sid
]
def update_performance(self):
# calculate performance as of last trade
for perf_period in self.perf_periods:
perf_period.calculate_performance()
def get_portfolio(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
return self.cumulative_performance.as_portfolio()
def get_account(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
if self.account_needs_update:
self._update_account()
return self._account
def _update_account(self):
self._account = self.cumulative_performance.as_account()
self.account_needs_update = False
def to_dict(self, emission_type=None):
"""
Creates a dictionary representing the state of this tracker.
Returns a dict object of the form described in header comments.
"""
# Default to the emission rate of this tracker if no type is provided
if emission_type is None:
emission_type = self.emission_rate
_dict = {
'period_start': self.period_start,
'period_end': self.period_end,
'capital_base': self.capital_base,
'cumulative_perf': self.cumulative_performance.to_dict(),
'progress': self.progress,
'cumulative_risk_metrics': self.cumulative_risk_metrics.to_dict()
}
if emission_type == 'daily':
_dict['daily_perf'] = self.todays_performance.to_dict()
elif emission_type == 'minute':
_dict['minute_perf'] = self.todays_performance.to_dict(
self.saved_dt)
else:
raise ValueError("Invalid emission type: %s" % emission_type)
return _dict
def process_trade(self, event):
# update last sale, and pay out a cash adjustment
cash_adjustment = self.position_tracker.update_last_sale(event)
if cash_adjustment != 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(cash_adjustment)
def process_transaction(self, event):
self.txn_count += 1
self.position_tracker.execute_transaction(event)
for perf_period in self.perf_periods:
perf_period.handle_execution(event)
def process_dividend(self, dividend):
log.info("Ignoring DIVIDEND event.")
def process_split(self, event):
leftover_cash = self.position_tracker.handle_split(event)
if leftover_cash > 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(leftover_cash)
def process_order(self, event):
for perf_period in self.perf_periods:
perf_period.record_order(event)
def process_commission(self, event):
self.position_tracker.handle_commission(event)
for perf_period in self.perf_periods:
perf_period.handle_commission(event)
def process_benchmark(self, event):
if self.sim_params.data_frequency == 'minute' and \
self.sim_params.emission_rate == 'daily':
# Minute data benchmarks should have a timestamp of market
# close, so that calculations are triggered at the right time.
# However, risk module uses midnight as the 'day'
# marker for returns, so adjust back to midnight.
midnight = pd.tseries.tools.normalize_date(event.dt)
else:
midnight = event.dt
if midnight not in self.all_benchmark_returns.index:
raise AssertionError(
("Date %s not allocated in all_benchmark_returns. "
"Calendar seems to mismatch with benchmark. "
"Benchmark container is=%s" %
(midnight,
self.all_benchmark_returns.index)))
self.all_benchmark_returns[midnight] = event.returns
def process_close_position(self, event):
# CLOSE_POSITION events that contain prices that must be handled as
# a final trade event
if 'price' in event:
self.process_trade(event)
txn = self.position_tracker.\
maybe_create_close_position_transaction(event)
if txn:
self.process_transaction(txn)
def check_upcoming_dividends(self, next_trading_day):
"""
Check if we currently own any stocks with dividends whose ex_date is
the next trading day. Track how much we should be payed on those
dividends' pay dates.
Then check if we are owed cash/stock for any dividends whose pay date
is the next trading day. Apply all such benefits, then recalculate
performance.
"""
if len(self.dividend_frame) == 0:
# We don't currently know about any dividends for this simulation
# period, so bail.
return
# Dividends whose ex_date is the next trading day. We need to check if
# we own any of these stocks so we know to pay them out when the pay
# date comes.
ex_date_mask = (self.dividend_frame['ex_date'] == next_trading_day)
dividends_earnable = self.dividend_frame[ex_date_mask]
# Dividends whose pay date is the next trading day. If we held any of
# these stocks on midnight before the ex_date, we need to pay these out
# now.
pay_date_mask = (self.dividend_frame['pay_date'] == next_trading_day)
dividends_payable = self.dividend_frame[pay_date_mask]
position_tracker = self.position_tracker
if len(dividends_earnable):
position_tracker.earn_dividends(dividends_earnable)
if not len(dividends_payable):
return
net_cash_payment = position_tracker.pay_dividends(dividends_payable)
for period in self.perf_periods:
# notify periods to update their stats
period.handle_dividends_paid(net_cash_payment)
def check_asset_auto_closes(self, next_trading_day):
"""
Check if the position tracker currently owns any Assets with an
auto-close date that is the next trading day. Close those positions.
Parameters
----------
next_trading_day : pandas.Timestamp
The next trading day of the simulation
"""
auto_close_events = self.position_tracker.auto_close_position_events(
next_trading_day=next_trading_day
)
for event in auto_close_events:
self.process_close_position(event)
def handle_minute_close(self, dt):
"""
Handles the close of the given minute. This includes handling
market-close functions if the given minute is the end of the market
day.
Parameters
__________
dt : Timestamp
The minute that is ending
Returns
_______
(dict, dict/None)
A tuple of the minute perf packet and daily perf packet.
If the market day has not ended, the daily perf packet is None.
"""
self.update_performance()
todays_date = normalize_date(dt)
account = self.get_account(False)
self.minute_performance.rollover()
bench_returns = self.all_benchmark_returns.loc[todays_date:dt]
# cumulative returns
bench_since_open = (1. + bench_returns).prod() - 1
self.cumulative_risk_metrics.update(todays_date,
self.todays_performance.returns,
bench_since_open,
account)
minute_packet = self.to_dict(emission_type='minute')
# if this is the close, update dividends for the next day.
# Return the performance tuple
if dt == self.market_close:
return (minute_packet, self._handle_market_close(todays_date))
else:
return (minute_packet, None)
def handle_market_close_daily(self):
"""
Function called after handle_data when running with daily emission
rate.
"""
self.update_performance()
completed_date = self.day
account = self.get_account(False)
# update risk metrics for cumulative performance
self.cumulative_risk_metrics.update(
completed_date,
self.todays_performance.returns,
self.all_benchmark_returns[completed_date],
account)
return self._handle_market_close(completed_date)
def _handle_market_close(self, completed_date):
# increment the day counter before we move markers forward.
self.day_count += 1.0
# Get the next trading day and, if it is past the bounds of this
# simulation, return the daily perf packet
next_trading_day = TradingEnvironment.instance().\
next_trading_day(completed_date)
# Check if any assets need to be auto-closed before generating today's
# perf period
if next_trading_day:
self.check_asset_auto_closes(next_trading_day=next_trading_day)
# Take a snapshot of our current performance to return to the
# browser.
daily_update = self.to_dict(emission_type='daily')
# On the last day of the test, don't create tomorrow's performance
# period. We may not be able to find the next trading day if we're at
# the end of our historical data
if self.market_close >= self.last_close:
return daily_update
# move the market day markers forward
env = TradingEnvironment.instance()
self.market_open, self.market_close = \
env.next_open_and_close(self.day)
self.day = env.next_trading_day(self.day)
# Roll over positions to current day.
self.todays_performance.rollover()
self.todays_performance.period_open = self.market_open
self.todays_performance.period_close = self.market_close
# If the next trading day is irrelevant, then return the daily packet
if (next_trading_day is None) or (next_trading_day >= self.last_close):
return daily_update
# Check for any dividends and auto-closes, then return the daily perf
# packet
self.check_upcoming_dividends(next_trading_day=next_trading_day)
return daily_update
def handle_simulation_end(self):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log_msg = "Simulated {n} trading days out of {m}."
log.info(log_msg.format(n=int(self.day_count), m=self.total_days))
log.info("first open: {d}".format(
d=self.sim_params.first_open))
log.info("last close: {d}".format(
d=self.sim_params.last_close))
bms = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.benchmark_returns_cont)
ars = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.algorithm_returns_cont)
acl = self.cumulative_risk_metrics.algorithm_cumulative_leverages
self.risk_report = risk.RiskReport(
ars,
self.sim_params,
benchmark_returns=bms,
algorithm_leverages=acl)
risk_dict = self.risk_report.to_dict()
return risk_dict
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
state_dict['dividend_frame'] = pickle.dumps(self.dividend_frame)
state_dict['_dividend_count'] = self._dividend_count
# we already store perf periods as attributes
del state_dict['perf_periods']
STATE_VERSION = 3
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 3
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("PerformanceTracker saved state is too old.")
self.__dict__.update(state)
# Handle the dividend frame specially
self.dividend_frame = pickle.loads(state['dividend_frame'])
# properly setup the perf periods
self.perf_periods = []
p_types = ['cumulative', 'todays', 'minute']
for p_type in p_types:
name = p_type + '_performance'
period = getattr(self, name, None)
if period is None:
continue
period._position_tracker = self.position_tracker
self.perf_periods.append(period)
|
apache-2.0
|
sambiak/recommandation-film
|
ml-latest-small/etude_efficacite.py
|
1
|
7005
|
from random import randrange
import numpy as np
import math
import matplotlib.pyplot as plt
from movielens import tableau_des_notes
from recomendation_system_final import descente_du_gradient_2
import time
def nbre_de_film_vu_par_utilisateur(array,i):
"""
:param array: tableau de notes avec utilisateurs en lignes et films en colonnes
:param i: ième utilisateur
:return: nb de films vu par l'utilisateur
"""
k=0
for j in array[i,:]:
if not math.isnan(j) :
k+=1
return k
def notes_extraites_pour_validation(tableau_des_notes_entier):
"""
:param tableau_des_notes_entier: tableau de notes avec utilisateurs en lignes et films en colonnes
:return: tableau de notes avec 10% des notes enlevées(tableau de validation), liste indiquant les couples caractérisant les notes enlevées (utilisateur,film)
"""
na_n = float('nan')
T_validation=tableau_des_notes_entier.copy()
L_validation = []
for n_eme_note_de_validation in range (9922):
numero_utilisateur = randrange (0,670)
while nbre_de_film_vu_par_utilisateur(tableau_des_notes_entier,numero_utilisateur) < 10:
numero_utilisateur = randrange (0,670)
numero_film = randrange (1,9125)
compteur = [0,0]
for i, film in enumerate(T_validation[numero_utilisateur]):
if not math.isnan(film):
compteur = [compteur[0]+1,i]
if compteur[0] == numero_film:
film = na_n
compteur = [0,0]
L_validation.append([numero_utilisateur,i])
break
if compteur[0] != 0:
T_validation[numero_utilisateur][compteur[1]] = na_n
L_validation.append([numero_utilisateur,compteur[1]])
compteur = [0,0]
return T_validation, L_validation
def notes_extraites_pour_test(tableau_des_notes_validation,tableau_des_notes_entier):
"""
:param tableau_des_notes_validation: tableau des notes (moins 10% des notes) utilisé pour optimiser
:param tableau_des_notes_entier: tableau de notes avec utilisateurs en lignes et films en colonnes
:return: tableau de notes avec 10% de notes enlevées (pas les mêmes que pour la validation), liste indiquant les couples caractérisant les notes enlevées (utilisateur,film)
"""
na_n = float('nan')
T_validation = tableau_des_notes_validation.copy()
T_test=tableau_des_notes_entier.copy()
L_test = []
for n_eme_note_de_validation in range (9922):
numero_utilisateur = randrange (0,670)
while nbre_de_film_vu_par_utilisateur(tableau_des_notes_validation,numero_utilisateur) < 10:
numero_utilisateur = randrange (0,670)
numero_film = randrange (1,9125)
compteur = [0,0]
for i, film in enumerate(T_validation[numero_utilisateur]):
if not math.isnan (film):
compteur = [compteur[0]+1,i]
if compteur[0] == numero_film:
T_test[numero_utilisateur][i] = na_n
compteur = [0,0]
L_test.append([numero_utilisateur,i])
break
if compteur[0] != 0:
T_test[numero_utilisateur][compteur[1]] = na_n
L_test.append([numero_utilisateur,compteur[1]])
compteur = [0,0]
return T_test, L_test
def ecart_quadratique(L_notes_enlevees,Y_predit,tableau_des_notes_entier):
"""
:param L_notes_enlevees: liste de couples caractérisant les notes enlevées du tableau (utilisateur,film)
:param Y_predit: tableau de notes plein avec prédiction par descente du gradient à partir du tableau de validation
:param tableau_des_notes_entier: tableau de notes avec utilisateurs en lignes et films en colonnes (tableau de départ)
:return: ecart quadratique
"""
somme = 0
for note in L_notes_enlevees:
somme+= (Y_predit[note[0]][note[1]]-tableau_des_notes_entier[note[0]][note[1]])**2
return somme/9922
def ecart_moyen(L_notes_enlevees,Y_predit,tableau_des_notes_entier):
"""
:param L_notes_enlevees: liste de couples caractérisant les notes enlevées du tableau (utilisateur,film)
:param Y_predit: tableau de notes plein avec prédiction par descente du gradient à partir du tableau de validation
:param tableau_des_notes_entier: tableau de notes avec utilisateurs en lignes et films en colonnes (tableau de départ)
:return: ecart quadratique
"""
somme = 0
for note in L_notes_enlevees:
somme+= abs(Y_predit[note[0]][note[1]]-tableau_des_notes_entier[note[0]][note[1]])
return somme/9922
def nb_etapes_optimal(mini, V, X, theta, nb_car, nb_etapes, alpha_X, alpha_theta):
"""
:param mini: liste indiquant quand est-ce qu'on obtient l'écart minimum
:param V: tableau de notes (moins 10%)
:param nb_car: nb de caractéristiques associé à chaque film
:param nb_etapes: nb d'étapes du gradient à effectuer
:param alpha_X: valeur de alpha pour la descente du gradient pour X
:param alpha_theta: valeur de alpha pour la descente du gradient pour theta
:print: écart quadratique pour un certain nb d'étapes
:show: graphique écart en fonction du nbhttps://apps.wifirst.net/?redirected=true_etapes
:return mini: nouveau minimum
"""
x = []
y = []
n_etape = 50
while n_etape <= nb_etapes:
d = descente_du_gradient_2(X, theta, V[0], nb_car, 50, alpha_X, alpha_theta)
ecart = ecart_moyen(V[1],np.dot(d[0],(d[1]).T),tableau_des_notes())
if ecart < mini[0]:
mini = [ecart, n_etape, nb_car, alpha_X, alpha_theta]
print(ecart," ",n_etape)
x += [n_etape]
y += [ecart]
X = d[1]
theta = d[0]
n_etape += 50
plt.plot(x,y)
plt.ylabel('Ecart moyen')
nom_x = "Nombre d'étapes // nb_car = " + str(nb_car) + " / alpha_X = " + str(alpha_X) + " / alpha_theta = " + str(alpha_theta)
plt.xlabel(nom_x)
nom_fichier = "nb_car-" + str(nb_car) + "_alpha_X-" + str(alpha_X) + "_alpha_theta-" + str(alpha_theta) + ".png"
plt.savefig(nom_fichier)
plt.show()
return mini
t1 = time.time()
V = notes_extraites_pour_validation(tableau_des_notes())
nb_car = 10
alpha_X = 0.001
alpha_theta = 0.0001
mini = [100, 100, nb_car, alpha_X, alpha_theta]
NB_CAR = []
NB_ETAPES = []
ECART = []
while nb_car <= 150:
X = np.random.random((len(tableau_des_notes()[0]), nb_car))
theta = np.random.random((len(tableau_des_notes()), nb_car))
while alpha_X >= 0.000125:
while alpha_theta >= 0.0000125:
mini = nb_etapes_optimal(mini, V, X, theta, nb_car, 2000, alpha_X, alpha_theta)
alpha_theta /= 2
print(mini)
alpha_theta = 0.0001
alpha_X /= 2
alpha_X = 0.001
nb_car += 10
print(mini)
print(time.time()-t1)
|
gpl-3.0
|
shangwuhencc/scikit-learn
|
examples/svm/plot_iris.py
|
225
|
3252
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
|
bsd-3-clause
|
kelseyoo14/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/core/dtypes.py
|
9
|
5492
|
""" define extension dtypes """
import re
import numpy as np
from pandas import compat
class ExtensionDtype(object):
"""
A np.dtype duck-typed class, suitable for holding a custom dtype.
THIS IS NOT A REAL NUMPY DTYPE
"""
name = None
names = None
type = None
subdtype = None
kind = None
str = None
num = 100
shape = tuple()
itemsize = 8
base = None
isbuiltin = 0
isnative = 0
_metadata = []
def __unicode__(self):
return self.name
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
def __hash__(self):
raise NotImplementedError("sub-classes should implement an __hash__ method")
def __eq__(self, other):
raise NotImplementedError("sub-classes should implement an __eq__ method")
@classmethod
def is_dtype(cls, dtype):
""" Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """
if hasattr(dtype, 'dtype'):
dtype = dtype.dtype
if isinstance(dtype, cls):
return True
elif isinstance(dtype, np.dtype):
return False
try:
return cls.construct_from_string(dtype) is not None
except:
return False
class CategoricalDtypeType(type):
"""
the type of CategoricalDtype, this metaclass determines subclass ability
"""
pass
class CategoricalDtype(ExtensionDtype):
"""
A np.dtype duck-typed class, suitable for holding a custom categorical dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.object
"""
name = 'category'
type = CategoricalDtypeType
kind = 'O'
str = '|O08'
base = np.dtype('O')
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
return isinstance(other, CategoricalDtype)
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if its not possible """
try:
if string == 'category':
return cls()
except:
pass
raise TypeError("cannot construct a CategoricalDtype")
class DatetimeTZDtypeType(type):
"""
the type of DatetimeTZDtype, this metaclass determines subclass ability
"""
pass
class DatetimeTZDtype(ExtensionDtype):
"""
A np.dtype duck-typed class, suitable for holding a custom datetime with tz dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.datetime64[ns]
"""
type = DatetimeTZDtypeType
kind = 'M'
str = '|M8[ns]'
num = 101
base = np.dtype('M8[ns]')
_metadata = ['unit','tz']
_match = re.compile("(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
def __init__(self, unit, tz=None):
"""
Parameters
----------
unit : string unit that this represents, currently must be 'ns'
tz : string tz that this represents
"""
if isinstance(unit, DatetimeTZDtype):
self.unit, self.tz = unit.unit, unit.tz
return
if tz is None:
# we were passed a string that we can construct
try:
m = self._match.search(unit)
if m is not None:
self.unit = m.groupdict()['unit']
self.tz = m.groupdict()['tz']
return
except:
raise ValueError("could not construct DatetimeTZDtype")
raise ValueError("DatetimeTZDtype constructor must have a tz supplied")
if unit != 'ns':
raise ValueError("DatetimeTZDtype only supports ns units")
self.unit = unit
self.tz = tz
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if its not possible """
try:
return cls(unit=string)
except ValueError:
raise TypeError("could not construct DatetimeTZDtype")
def __unicode__(self):
# format the tz
return "datetime64[{unit}, {tz}]".format(unit=self.unit, tz=self.tz)
@property
def name(self):
return str(self)
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
return isinstance(other, DatetimeTZDtype) and self.unit == other.unit and self.tz == other.tz
|
artistic-2.0
|
MechCoder/scikit-learn
|
examples/plot_multioutput_face_completion.py
|
79
|
2986
|
"""
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
# Upper half of the faces
X_train = train[:, :(n_pixels + 1) // 2]
# Lower half of the faces
y_train = train[:, n_pixels // 2:]
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
|
bsd-3-clause
|
bibsian/database-development
|
poplerGUI/ui_logic_covar.py
|
1
|
3178
|
#!/usr/bin/env python
from PyQt4 import QtGui
from poplerGUI import class_inputhandler as ini
from poplerGUI import class_modelviewpandas as views
from poplerGUI import ui_logic_preview as tprev
from poplerGUI.logiclayer import class_userfacade as face
from poplerGUI.logiclayer import class_helpers as hlp
from poplerGUI.logiclayer import class_dictionarydataframe as ddf
from Views import ui_dialog_covariate as covar
class CovarDialog(QtGui.QDialog, covar.Ui_Dialog):
'''
Class to handler the user input into the covariates
dialog box i.e. verify inputs and format data.
'''
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
# Attributes
self.covarlned = {'columns': None}
self.covarini = None
self._log = None
self.covardata = None
self.covarmodel = None
# Signals/slots
self.btnColumns.clicked.connect(self.submit_change)
self.btnPreview.clicked.connect(self.submit_change)
self.btnSaveClose.clicked.connect(self.submit_change)
self.btnCancel.clicked.connect(self.close)
# Pop up widgets
self.message = QtGui.QMessageBox
self.error = QtGui.QErrorMessage()
self.preview = tprev.TablePreview()
def submit_change(self):
sender = self.sender()
self.covarlned['columns'] = hlp.string_to_list(
self.lnedColumns.text()
)
self.covarini = ini.InputHandler(
name='covarinfo', tablename='covartable',
lnedentry=self.covarlned)
self.facade.input_register(self.covarini)
self.facade.create_log_record('covartable')
self._log = self.facade._tablelog['covartable']
if self.covarlned['columns'][0] == '':
print('in pass')
pass
else:
try:
self.facade._data[self.covarlned['columns']]
except Exception as e:
print(str(e))
self._log.debug(str(e))
self.error.showMessage(
'Column names not valid: Check spacing ' +
'and headers.')
raise ValueError('Column names are incorrect')
try:
self.covardata = ddf.DictionaryDataframe(
self.facade._data, self.covarlned['columns']
).convert_records()
except Exception as e:
print(str(e))
self._log.debug(str(e))
self.error.showMessage('Could not concatenate columns')
raise TypeError('Could not concatenate columns')
self.facade.push_tables['covariates'] = self.covardata
if sender is self.btnColumns:
self.message.about(self, 'Status', 'Columns recorded')
elif sender is self.btnPreview:
self.covarmodel = views.PandasTableModelEdit(
self.covardata)
self.preview.tabviewPreview.setModel(self.covarmodel)
self.preview.show()
elif sender is self.btnSaveClose:
hlp.write_column_to_log(
self.covarlned, self._log, 'covartable')
self.close()
|
mit
|
ghorn/rawesome
|
examples/pumping_mode/pumping_poweropt.py
|
2
|
13173
|
# Copyright 2012-2013 Greg Horn
#
# This file is part of rawesome.
#
# rawesome is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rawesome is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with rawesome. If not, see <http://www.gnu.org/licenses/>.
import casadi as C
import matplotlib.pyplot as plt
import pickle
import numpy
from numpy import pi
import rawe
import rawekite
from autogen.topumpingProto import toProto
from autogen.pumping_pb2 import Trajectory
numLoops=6
powerType = 'mechanical'
#powerType = 'electrical'
def constrainInvariants(ocp):
R_n2b = ocp.lookup('R_n2b',timestep=0)
rawekite.kiteutils.makeOrthonormal(ocp, R_n2b)
ocp.constrain(ocp.lookup('c',timestep=0), '==', 0, tag=('c(0)==0',None))
ocp.constrain(ocp.lookup('cdot',timestep=0), '==', 0, tag=('cdot(0)==0',None))
# constrain line angle
for k in range(0,nk):
for j in range(0,ocp.deg+1):
ocp.constrain(ocp.lookup('cos_line_angle',timestep=k,degIdx=j),'>=',C.cos(65*pi/180), tag=('line angle',k))
def constrainAirspeedAlphaBeta(ocp):
for k in range(0,nk):
ocp.constrain(ocp.lookup('airspeed',timestep=k), '>=', 10, tag=('airspeed',k))
for j in range(0,ocp.deg+1):
ocp.constrainBnds(ocp.lookup('alpha_deg',timestep=k,degIdx=j), (-4.5,8.5), tag=('alpha(deg)',k))
ocp.constrainBnds(ocp.lookup('beta_deg', timestep=k), (-9,9), tag=('beta(deg)',k))
def constrainTetherForce(ocp):
for k in range(ocp.nk):
# ocp.constrain( ocp.lookup('tether_tension',timestep=k,degIdx=1), '>=', 0, tag=('tether tension positive',k))
# ocp.constrain( ocp.lookup('tether_tension',timestep=k,degIdx=ocp.deg), '>=', 0, tag=('tether tension positive',k))
for j in range(1,ocp.deg+1):
ocp.constrain( ocp.lookup('tether_tension',timestep=k,degIdx=j), '>=', 0, tag=('tether tension positive',k))
def realMotorConstraints(ocp):
for k in range(nk):
# ocp.constrain( ocp.lookup('torque',timestep=k,degIdx=1), '<=', 150, tag=('motor torque',k))
# ocp.constrain( ocp.lookup('torque',timestep=k,degIdx=ocp.deg), '<=', 150, tag=('motor torque',k))
ocp.constrain( ocp.lookup('torque',timestep=k,degIdx=1), '<=', 78, tag=('motor torque',k))
ocp.constrain( ocp.lookup('torque',timestep=k,degIdx=ocp.deg), '<=', 78, tag=('motor torque',k))
ocp.constrain( ocp.lookup('rpm',timestep=k), '<=', 1500, tag=('rpm',k))
ocp.constrain( -1500, '<=', ocp.lookup('rpm',timestep=k), tag=('rpm',k))
def fakeMotorConstraints(ocp):
for k in range(nk):
ocp.constrain( ocp.lookup('torque',timestep=k,degIdx=1), '<=', 150, tag=('motor torque',k))
ocp.constrain( ocp.lookup('torque',timestep=k,degIdx=ocp.deg), '<=', 150, tag=('motor torque',k))
def setupOcp(dae,conf,nk,nicp,deg,collPoly):
def addCosts():
dddr = dae['dddr']
daileron = dae['daileron']
delevator = dae['delevator']
drudder = dae['drudder']
dflaps = dae['dflaps']
daileronSigma = 0.001
delevatorSigma = 0.8
dddrSigma = 10.0
drudderSigma = 0.1
dflapsSigma = 0.1
fudgeFactor = 1e-1
nkf = float(nk)
dae['daileronCost'] = fudgeFactor*daileron*daileron / (daileronSigma*daileronSigma*nkf)
dae['delevatorCost'] = fudgeFactor*delevator*delevator / (delevatorSigma*delevatorSigma*nkf)
dae['drudderCost'] = fudgeFactor*drudder*drudder / (drudderSigma*drudderSigma*nkf)
dae['dflapsCost'] = fudgeFactor*dflaps*dflaps / (dflapsSigma*dflapsSigma*nkf)
dae['dddrCost'] = fudgeFactor*dddr*dddr / (dddrSigma*dddrSigma*nkf)
addCosts()
ocp = rawe.collocation.Coll(dae, nk=nk, nicp=nicp, deg=deg, collPoly=collPoly)
ocp.setupCollocation(ocp.lookup('endTime'))
constrainInvariants(ocp)
constrainAirspeedAlphaBeta(ocp)
constrainTetherForce(ocp)
#realMotorConstraints(ocp)
fakeMotorConstraints(ocp)
# make it periodic
for name in [ "r_n2b_n_y","r_n2b_n_z",
"v_bn_n_y","v_bn_n_z",
"w_bn_b_x","w_bn_b_y","w_bn_b_z",
"r","dr","ddr",
'aileron','elevator','rudder','flaps'
]:
ocp.constrain(ocp.lookup(name,timestep=0),'==',ocp.lookup(name,timestep=-1), tag=('periodic diff state \"'+name+'"',None))
# periodic attitude
rawekite.kiteutils.periodicDcm(ocp)
# bounds
ocp.bound('aileron', (numpy.radians(-10),numpy.radians(10)))
ocp.bound('elevator',(numpy.radians(-10),numpy.radians(10)))
ocp.bound('rudder', (numpy.radians(-10),numpy.radians(10)))
ocp.bound('flaps', (numpy.radians(0),numpy.radians(0)))
# can't bound flaps==0 AND have periodic flaps at the same time
# bounding flaps (-1,1) at timestep 0 doesn't really free them, but satisfies LICQ
ocp.bound('flaps', (-1,1),timestep=0,quiet=True)
ocp.bound('daileron',(-2.0,2.0))
ocp.bound('delevator',(-2.0,2.0))
ocp.bound('drudder',(-2.0,2.0))
ocp.bound('dflaps',(-2.0,2.0))
ocp.bound('r_n2b_n_x',(-2000,2000))
ocp.bound('r_n2b_n_y',(-2000,2000))
if 'minAltitude' in conf:
ocp.bound('r_n2b_n_z',(-2000, -conf['minAltitude']))
else:
ocp.bound('r_n2b_n_z',(-2000, -0.05))
ocp.bound('r',(1,500))
ocp.bound('dr',(-100,100))
ocp.bound('ddr',(-500,500))
ocp.bound('dddr',(-50000,50000))
for e in ['e11','e21','e31','e12','e22','e32','e13','e23','e33']:
ocp.bound(e,(-1.1,1.1))
for d in ['v_bn_n_x','v_bn_n_y','v_bn_n_z']:
ocp.bound(d,(-200,200))
for w in ['w_bn_b_x',
'w_bn_b_y',
'w_bn_b_z']:
ocp.bound(w,(-6*pi,6*pi))
# ocp.bound('endTime',(0.5,12))
ocp.bound('endTime',(0.5,numLoops*7.5))
ocp.bound('w0',(10,10))
# boundary conditions
ocp.bound('r_n2b_n_y',(0,0),timestep=0,quiet=True)
# guesses
ocp.guess('endTime',5.4)
ocp.guess('w0',10)
# objective function
obj = 0
for k in range(nk):
# control regularization
obj += ocp.lookup('daileronCost',timestep=k)
obj += ocp.lookup('delevatorCost',timestep=k)
obj += ocp.lookup('drudderCost',timestep=k)
obj += ocp.lookup('dflapsCost',timestep=k)
obj += ocp.lookup('dddrCost',timestep=k)
ocp.setQuadratureDdt('mechanical_energy', 'mechanical_winch_power')
ocp.setQuadratureDdt('electrical_energy', 'electrical_winch_power')
ocp.setObjective( obj + ocp.lookup(powerType+'_energy',timestep=-1)/ocp.lookup('endTime') )
return ocp
if __name__=='__main__':
print "reading config..."
# from carousel_conf import conf
#from highwind_carousel_conf import conf
from rawe.models.betty_conf import makeConf
# nk = 30*numLoops
nk = 512
# nk = 70
print "creating model..."
conf = makeConf()
dae = rawe.models.crosswind(conf)
dae.addP('endTime')
conf['minAltitude'] = 0
print "setting up ocp..."
nicp = 1
deg = 4
collPoly='RADAU'
#collPoly='LEGENDRE'
ocp = setupOcp(dae,conf,nk,nicp,deg,collPoly)
# spawn telemetry thread
callback = rawe.telemetry.startTelemetry(
ocp, callbacks=[
(rawe.telemetry.trajectoryCallback(toProto, Trajectory, showAllPoints=False), 'pumping trajectory')
])
# solver
ipoptOptions = [("linear_solver","ma97"),
("expand",True),
("max_iter",2000),
("tol",1e-8)]
worhpOptions = [("Max_Iter",5000),
("expand",True),
#("MaxIter",5000),
("Timeout", 1e6),
("UserHM", True),
("ScaleConIter",True),
("ScaledFD",True),
("ScaledKKT",True),
("ScaledObj",True),
("ScaledQP",True)
]
print "setting up solver..."
solverOptions = ipoptOptions
# solverOptions = worhpOptions
ocp.setupSolver( solverOpts=solverOptions,
callback=callback )
# ocp.interpolateInitialGuess("data/crosswind_homotopy.dat",force=True,quiet=True,numLoops=numLoops)
ocp.interpolateInitialGuess("data/crosswind_opt_mechanical_6_loops-backup.dat",force=True,quiet=True)
# ocp.interpolateInitialGuess("data/crosswind_opt_electrical_1_loops.dat",force=True,quiet=True,numLoops=numLoops)
# ocp.interpolateInitialGuess('data/crosswind_opt_'+powerType+'_1_loops.dat',
# force=True,quiet=True,numLoops=numLoops)
# ocp.interpolateInitialGuess("data/crosswind_opt.dat",force=True,quiet=True,numLoops=numLoops)
# ocp.interpolateInitialGuess("data/crosswind_opt_electrical_2_loops.dat",force=True,quiet=True,numLoops=1)
traj = ocp.solve()
print "num loops: "+str(numLoops)
print "optimizing "+powerType
print "optimal mechanical power: "+str(traj.lookup('mechanical_energy',-1)/traj.lookup('endTime'))
print "optimal electrical power: "+str(traj.lookup('electrical_energy',-1)/traj.lookup('endTime'))
print "endTime: "+str(traj.lookup('endTime'))
traj.saveMat('data/crosswind_opt_'+powerType+'_'+str(numLoops)+'_loops.mat',
dataname='crosswind_opt_'+powerType+'_'+str(numLoops)+'_loops')
traj.save('data/crosswind_opt_'+powerType+'_'+str(numLoops)+'_loops.dat')
# Plot the results
def plotResults():
# traj.subplot(['aero_fx','aero_fy','aero_fz'])
# traj.subplot(['aero_mx','aero_my','aero_mz'])
# traj.subplot(['r_n2b_n_x','r_n2b_n_y','r_n2b_n_z'])
# traj.subplot(['v_bn_n_x','v_bn_n_y','v_bn_n_z'])
traj.subplot(['aileron','elevator','rudder','flaps'],title='control surfaces')
traj.subplot([['dddr'],['daileron','delevator','drudder','dflaps']],title='control surfaces')
# traj.subplot(['wind_at_altitude','dr'],title='')
# traj.subplot(['c','cdot','cddot'],title="invariants")
traj.plot('airspeed',title='airspeed')
traj.subplot([['alpha_deg'],['beta_deg']])
traj.subplot([['cL'],['cD','cD_tether'],['L_over_D','L_over_D_with_tether']],title='')
# traj.subplot([['winch_power'], ['tether_tension'],['accel_g','accel_without_gravity_g']])
# traj.subplot([['rpm'],['dr']])
traj.subplot([['tether_tension'],['torque']])
# traj.plot(['mechanical_winch_power', 'electrical_winch_power'])
# traj.plot('r')
# traj.subplot([['ddx','ddy','ddz'],['accel','accel without gravity']])
# traj.plot(["loyds_limit","loyds_limit_exact","neg_winch_power"])
# traj.plot(["loyd's limit","-(winch power)"],title='')
traj.subplot(['daileronCost','delevatorCost','dddrCost'])
traj.subplot(['r','dr','ddr','dddr'])
# traj.subplot(['w_bn_b_x','w_bn_b_y','w_bn_b_z'])
# traj.subplot(['e11','e12','e13','e21','e22','e23','e31','e32','e33'])
# traj.plot('line_angle_deg')
# traj.plot('quadrature_energy')
# traj.subplot(['energy','quadrature_energy'])
# traj.plot(['energy','quadrature_energy'])
# traj.plot('nu')
plt.show()
# plotResults()
traj.subplot(['r','dr'])
traj.subplot(['rpm','torque'])
traj.subplot(['airspeed'])
plt.show()
def plotPaper():
plt.figure()
plt.subplot(221)
traj._plot('cL','',showLegend=False)
plt.legend(['$C_L$'])
plt.xlabel('')
plt.ylim([0.1,1.1])
plt.xlim([0,traj.tgrid[-1,0,0]])
plt.subplot(223)
traj._plot('L_over_D_with_tether','',showLegend=False)
plt.legend(['$L/D$'])
plt.ylim([2,15])
plt.xlim([0,traj.tgrid[-1,0,0]])
plt.subplot(222)
traj._plot('wind_at_altitude','',showLegend=False)
plt.xlabel('')
plt.ylabel('[m/s]')
plt.ylim([5.5,10])
plt.legend(['wind at altitude'])
plt.xlim([0,traj.tgrid[-1,0,0]])
plt.subplot(224)
traj._plot('dr','',showLegend=False)
plt.ylabel('[m/s]')
plt.ylim([-15,12])
plt.legend(['$\dot{l}$'])
plt.xlim([0,traj.tgrid[-1,0,0]])
# traj.subplot(['cL','L/D','dr'],title='')
# traj.plot(["loyd's limit","loyd's limit (exact)","-(winch power)"])
# traj.plot(["loyd's limit","-(winch power)"],title='')
plt.figure()
traj._plot("loyds_limit",'',showLegend=False,style=':')
traj._plot("neg_winch_power",'',showLegend=False)
plt.legend(["Loyd's limit","winch power"])
plt.ylabel('power [W]')
plt.ylim([-600,1100])
plt.grid()
plt.xlim([0,traj.tgrid[-1,0,0]])
plt.show()
#plotPaper()
|
lgpl-3.0
|
tribhuvanesh/vpa
|
vispr/resize_images.py
|
1
|
3489
|
#!/usr/bin/python
"""Resize images in $ROOT/images directory
Many images are of high-resolution - this takes time to read in the input pipeline during training. So, resize the
images in $ROOT/images directory maintaining aspect ratio.
"""
import json
import time
import pickle
import sys
import csv
import argparse
import os
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from scipy.misc import imread, imresize
from vispr import DS_ROOT
__author__ = "Tribhuvanesh Orekondy"
__maintainer__ = "Tribhuvanesh Orekondy"
__email__ = "[email protected]"
__status__ = "Development"
def resize_min_side(pil_img, min_len):
"""
Resize image such that the shortest side length = mins_len pixels
:param pil_img:
:param mins_len:
:return:
"""
# What's the min side?
w, h = pil_img.size
if w < h:
new_w = min_len
new_h = int(np.round(h * (new_w / float(w)))) # Scale height to same aspect ratio
else:
new_h = min_len
new_w = int(np.round(w * (new_h / float(h)))) # Scale height to same aspect ratio
return pil_img.resize((new_w, new_h))
def resize_img_in_dir(input_dir, output_dir, min_length, skip_existing=True):
print 'Input directory: ', input_dir
print 'Output directory: ', output_dir
print
num_files = len(os.listdir(input_dir))
print 'Resizing: {} images'.format(num_files)
num_existing_files = len(os.listdir(output_dir))
print 'Found: {} images already exist'.format(num_existing_files)
go_ahead = raw_input('Continue? [y/n]: ')
if go_ahead == 'y':
pass
else:
'Exiting...'
return
for idx, org_img_fname in enumerate(os.listdir(input_dir)):
resized_img_path = osp.join(output_dir, org_img_fname)
if osp.exists(resized_img_path) and skip_existing:
# Skip if it already exists
continue
org_img_path = osp.join(input_dir, org_img_fname)
org_img = Image.open(org_img_path)
resized_img = resize_min_side(org_img, min_len=min_length)
try:
resized_img.save(resized_img_path)
except IOError:
resized_img.convert('RGB').save(resized_img_path)
sys.stdout.write(
"Processing %d/%d (%.2f%% done) \r" % (idx, num_files, (idx + 1) * 100.0 / num_files))
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input_dir", type=str,
help="Directory containing images")
parser.add_argument("out_dir", type=str,
help="Directory to write resized output images")
parser.add_argument("-m", "--min_length", type=int, default=250,
help="Resize smallest dimension to this size")
parser.add_argument("-s", "--skip_existing", action='store_true', default=False,
help="Skip if the resized file already exists")
args = parser.parse_args()
params = vars(args)
# print 'Input parameters: '
# print json.dumps(params, indent=2)
input_dir = params['input_dir']
min_length = params['min_length']
out_dir = params['out_dir']
if not osp.exists(out_dir):
print 'Path {} does not exist. Creating it...'.format(out_dir)
os.makedirs(out_dir)
resize_img_in_dir(input_dir, out_dir, min_length=min_length, skip_existing=params['skip_existing'])
if __name__ == '__main__':
main()
|
apache-2.0
|
mehdidc/scikit-learn
|
sklearn/neural_network/tests/test_rbm.py
|
3
|
6324
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
"""BernoulliRBM should work on small sparse matrices."""
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
from the same input
"""
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
the same input even when the input is sparse, and test against non-sparse
"""
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
"""Check if we don't get NaNs sampling the full digits dataset.
Also check that sampling again will yield different results."""
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
"""Test score_samples (pseudo-likelihood) method."""
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
"""
Make sure RBM works with sparse input when verbose=True
"""
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
RandolphVI/CNN-Text-Classification
|
CNN/test_cnn.py
|
1
|
6208
|
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
import numpy as np
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
args = parser.parameter_parser()
MODEL = dh.get_model_name()
logger = dh.logger_fn("tflog", "logs/Test-{0}.log".format(time.asctime()))
CPT_DIR = 'runs/' + MODEL + '/checkpoints/'
BEST_CPT_DIR = 'runs/' + MODEL + '/bestcheckpoints/'
SAVE_DIR = 'output/' + MODEL
def create_input_data(data: dict):
return zip(data['f_pad_seqs'], data['b_pad_seqs'], data['onehot_labels'])
def test_cnn():
"""Test CNN model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load word2vec model
word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file)
# Load data
logger.info("Loading data...")
logger.info("Data processing...")
test_data = dh.load_data_and_labels(args, args.test_file, word2idx)
# Load cnn model
OPTION = dh._option(pattern=1)
if OPTION == 'B':
logger.info("Loading best model...")
checkpoint_file = cm.get_best_checkpoint(BEST_CPT_DIR, select_maximum_value=True)
else:
logger.info("Loading latest model...")
checkpoint_file = tf.train.latest_checkpoint(CPT_DIR)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x_front = graph.get_operation_by_name("input_x_front").outputs[0]
input_x_behind = graph.get_operation_by_name("input_x_behind").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
is_training = graph.get_operation_by_name("is_training").outputs[0]
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/topKPreds").outputs[0]
predictions = graph.get_operation_by_name("output/topKPreds").outputs[1]
loss = graph.get_operation_by_name("loss/loss").outputs[0]
# Split the output nodes name by '|' if you have several output nodes
output_node_names = "output/topKPreds"
# Save the .pb model file
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,
output_node_names.split("|"))
tf.train.write_graph(output_graph_def, "graph", "graph-cnn-{0}.pb".format(MODEL), as_text=False)
# Generate batches for one epoch
batches_test = dh.batch_iter(list(create_input_data(test_data)), args.batch_size, 1, shuffle=False)
# Collect the predictions here
test_counter, test_loss = 0, 0.0
true_labels = []
predicted_labels = []
predicted_scores = []
for batch_test in batches_test:
x_f, x_b, y_onehot = zip(*batch_test)
feed_dict = {
input_x_front: x_f,
input_x_behind: x_b,
input_y: y_onehot,
dropout_keep_prob: 1.0,
is_training: False
}
batch_predicted_scores, batch_predicted_labels, batch_loss \
= sess.run([scores, predictions, loss], feed_dict)
for i in y_onehot:
true_labels.append(np.argmax(i))
for j in batch_predicted_scores:
predicted_scores.append(j[0])
for k in batch_predicted_labels:
predicted_labels.append(k[0])
test_loss = test_loss + batch_loss
test_counter = test_counter + 1
test_loss = float(test_loss / test_counter)
# Calculate Precision & Recall & F1
test_acc = accuracy_score(y_true=np.array(true_labels), y_pred=np.array(predicted_labels))
test_pre = precision_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
test_rec = recall_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
test_F1 = f1_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
# Calculate the average AUC
test_auc = roc_auc_score(y_true=np.array(true_labels),
y_score=np.array(predicted_scores), average='micro')
logger.info("All Test Dataset: Loss {0:g} | Acc {1:g} | Precision {2:g} | "
"Recall {3:g} | F1 {4:g} | AUC {5:g}"
.format(test_loss, test_acc, test_pre, test_rec, test_F1, test_auc))
# Save the prediction result
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
dh.create_prediction_file(output_file=SAVE_DIR + "/predictions.json", front_data_id=test_data['f_id'],
behind_data_id=test_data['b_id'], true_labels=true_labels,
predict_labels=predicted_labels, predict_scores=predicted_scores)
logger.info("All Done.")
if __name__ == '__main__':
test_cnn()
|
apache-2.0
|
ivanamihalek/tcga
|
tcga/01_somatic_mutations/027_patient_freqs.py
|
1
|
9664
|
#!/usr/bin/python -u
#
# This source code is part of tcga, a TCGA processing pipeline, written by Ivana Mihalek.
# Copyright (C) 2014-2016 Ivana Mihalek.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see<http://www.gnu.org/licenses/>.
#
# Contact: [email protected]
#
from time import time
from tcga_utils.utils import *
from tcga_utils.ensembl import *
# note this has to come in this particular order
# if we are to plot to a file without opening yhr matplot window
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
drop_silent = True
special = None
verbose = False
#########################################
def rank_message (gene_name, freq_gene):
rank_msg = ""
if gene_name in freq_gene.keys():
less_mutated = len( [y for y in freq_gene.values() if y<freq_gene[gene_name]])
more_mutated = len( [y for y in freq_gene.values() if y>freq_gene[gene_name]])
rank_msg = "%7s mutated in %.3f%% patients (rank: %d-%d) " % \
(gene_name, freq_gene[gene_name], more_mutated, len(freq_gene)-less_mutated)
middle_range = float(more_mutated + len(freq_gene)-less_mutated)/2.0
else:
rank_msg = "%7s rank: %d (no patients)" % (gene_name, len(freq_gene))
middle_range = -1
return [rank_msg, middle_range]
#########################################
def live_plot ( title, freq_gene, sorted_genes, filename):
fig, ax1 = plt.subplots(1, 1)
ax1.set_title (title, fontsize=24)
#ax1.set_xscale("log", nonposx='clip')
ax1.set_xlabel('genes, listed by their rank', fontsize = 20)
ax1.set_ylabel('% of patients', fontsize = 24)
if special:
[rank_msg, middle_range] = rank_message(special, freq_gene)
#bg_color = (0, 102./255, 204./255) # this is blue. I believe
bg_color = (1, 1, 1)
#ax1.set_axis_bgcolor(bg_color)
x = range(1,len(sorted_genes)+1)
y = [freq_gene[gene] for gene in sorted_genes]
# fudge x and y to get steps
if True:
xfudge = []
yfudge = []
xfudge.append(x[0])
yfudge.append(y[0])
for i in range(1,len(y)):
xfudge.append(x[i])
yfudge.append(y[i-1])
xfudge.append(x[i])
yfudge.append(y[i])
x = xfudge
y = yfudge
ylim = min(max(y),2)
xlim = len(sorted_genes)
if special:
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.8)
#ax1.text(xlim*0.9, ylim*0.9, rank_msg, ha="right", va="top", size=14, bbox=bbox_props)
if middle_range < 0: middle_range = xlim
ax1.annotate ('', xy=(middle_range, 0), # use the axes data coordinate system
xytext = (middle_range, ylim/2), # fraction, fraction
arrowprops = dict(facecolor='red', shrink=0.05),
horizontalalignment = 'left',
verticalalignment = 'bottom')
#ax1.fill_between(x, y, interpolate=True, color=(255./255,153./255,51./255)) # orange
ax1.fill_between(x, y, interpolate=True, color=(128./255,128./255,128./255)) # gray
plt.ylim(0,ylim)
plt.xlim(0,xlim)
fig.tight_layout()
if filename:
plt.savefig(filename)
else:
plt.show()
#########################################
def main():
db = connect_to_mysql()
cursor = db.cursor()
db_names = ["ACC", "BLCA", "BRCA", "CESC", "CHOL", "COAD","ESCA", "GBM", "HNSC", "KICH" ,"KIRC",
"KIRP","LAML", "LGG", "LIHC", "LUAD", "LUSC", "OV", "PAAD", "PCPG", "PRAD", "REA",
"SARC", "SKCM", "STAD", "TGCT", "THCA", "THYM", "UCEC", "UCS", "UVM"]
#db_names = ["COAD"]
tables = ["somatic_mutations", "metastatic_mutations"]
full_name = read_cancer_names()
pancan_freq = {}
pancan_silent = {}
pancan_non_silent = {}
grand_total_patients = 0
for db_name in db_names:
print "######################################"
print db_name
switch_to_db (cursor, db_name)
############################
number_muts = 0
for table in tables:
qry = "select count(1) from %s " % table
rows = search_db(cursor, qry)
number_muts += int(rows[0][0])
print db_name, "total number of mutations: ", number_muts
############################
genes = set([])
for table in tables:
qry = "select distinct(hugo_symbol) from %s " % table
rows = search_db(cursor, qry)
if not rows: continue
genes |= set([row[0] for row in rows])
genes = list(genes)
print "number of affected genes:", len(genes)
############################
total_patients = 0
for table in tables:
qry = "select distinct(sample_barcode_short) from %s " % table
rows = search_db(cursor, qry)
if not rows: continue
patients = [row[0] for row in rows]
total_patients += len(patients)
print "\t", total_patients
grand_total_patients += total_patients
############################
print "frequencies reported per gene"
freq_gene = {}
special_dropped = False
silent_ct = 0
non_silent_ct = 1
prev_time = time()
ct = 0
#genes = ['RPL5', 'RPL11', 'TP53', 'APC']
for gene in genes:
ct += 1
if not ct%1000:
print "%4d out of %4d, time for the last 1000: %8.3f s" % (ct, len(genes), time()-prev_time)
prev_time = time()
#break
[silent_ct, non_silent_ct] = silent_proportion(cursor, gene)
#print gene, silent_ct, non_silent_ct
if drop_silent and (non_silent_ct==0 or float(silent_ct)/non_silent_ct>0.15):
#if non_silent_ct==0:
# print gene, 'non_silent_ct == 0', ' dropping'
#else:
# print gene, " %6.4f " % (float(silent_ct)/non_silent_ct), ' dropping'
continue
no_patients = 0
for table in tables:
qry = "select sample_barcode_short, count(sample_barcode_short) from %s " % table
qry += "where hugo_symbol='%s' " % gene
qry += "and variant_classification!='silent' and variant_classification!='RNA' "
qry += "group by sample_barcode_short"
rows = search_db(cursor, qry)
if rows:
no_patients += len(rows)
if no_patients==0: continue
if not pancan_freq.has_key(gene):
pancan_freq[gene] = 0
pancan_silent[gene] = 0
pancan_non_silent[gene] = 0
pancan_freq[gene] += no_patients
pancan_silent[gene] += silent_ct
pancan_non_silent[gene] += non_silent_ct
freq_gene[gene] = float(no_patients)/total_patients*100
#print gene, silent_ct, non_silent_ct
#print gene, pancan_silent[gene], pancan_non_silent[gene]
#print '-'*20
if special and special_dropped: continue
###################################
# in individual tumor types:
if False:
filename = db_name+"_somatic_freq.png"
title = full_name[db_name]
sorted_genes = sorted(freq_gene.keys(), key= lambda x: -freq_gene[x])
live_plot (title, freq_gene, sorted_genes, filename)
for gene in pancan_freq.keys():
pancan_freq[gene] /= float(grand_total_patients)
pancan_freq[gene] *= 100
sorted_genes = sorted(pancan_freq.keys(), key= lambda x: -pancan_freq[x])
if verbose:
for gene in sorted_genes:
print " %10s %6d%% %6.4f %4d %4d " % (gene, round(pancan_freq[gene]),
float(pancan_silent[gene])/pancan_non_silent[gene],
pancan_silent[gene], pancan_non_silent[gene])
# special interest:
if special:
gene = special
print " %10s %6d%% %6.4f %3d %3d %5d " % (gene, round(pancan_freq[gene]),
float(pancan_silent[gene])/pancan_non_silent[gene],
pancan_silent[gene], pancan_non_silent[gene],
sorted_genes.index(gene))
filename = "pancan_somatic_freq.filtered_for_silent_proportion.png"
live_plot ("Pan-cancer statistics", pancan_freq, sorted_genes, filename)
for gene in ['TP53', 'APC', 'RPL5', 'RPL11', 'RPL6', 'RPL23', ]:
msg, range = rank_message (gene, pancan_freq)
print msg
for gene in sorted_genes[:10]:
msg, range = rank_message (gene, pancan_freq)
print msg
cursor.close()
db.close()
#########################################
if __name__ == '__main__':
main()
|
gpl-3.0
|
ogirou/ODSTA
|
ODSTAG.py
|
1
|
19373
|
#! /usr/bin/python2.7
# coding=utf-8
########################################################
import matplotlib.pyplot as plt
import numpy as np
from numpy import *
import pylab
import os
import interpFunctions # Contient la fonction der_auto pour caler automatiquement
from interpFunctions import * # la droite de pente nulle de la dérivée à l'IARF
########################################################
print ''
print '################################################################'
print '################################################################'
print '### ###'
print '### Outil d analyse des essais en bout de tige pour les ###'
print '### fluides compressibles et incompressibles. Methodes ###'
print '### tirees de Ezekwe - 2010 - Petroleum reservoir engin- ###'
print '### eering practice - Prentice Hall ###'
print '### ###'
print '################################################################'
print '################################################################'
print ''
print 'Récuperation des données de pression et temps'
print''
#
# Tests logiques pour entrer les unites de pression et temps
#
# Pression
#
q_p=int(raw_input('Unité de pression? psi = 0, kPa = 1, Mpa = 2\n'))
if q_p==2:
unit_p='MPa'
print 'Conversion MPa en psi'
print ''
p_MPa=loadtxt("pression.txt")
p=p_MPa/0.00689475729
elif q_p==1:
unit_p='kPa'
print 'Conversion kPa en psi'
print ''
p_kPa=loadtxt("pression.txt")
p=p_kPa/6.89475729
else:
unit_p='psi'
p = loadtxt("pression.txt")
print 'Pressions',p,'psi'
print ''
#
# Temps de test
#
q_t=int(raw_input('Unité de temps? min = 0, h = 1'))
if q_t==0:
unit_t='min'
t = loadtxt("temps.txt")
else:
unit_t='heure'
print 'Conversion h en min'
print ''
t=60*loadtxt("temps.txt")
if t[0]==0:
t=t[1:] # on veut exclure t=0 car
p=p[1:] # un calcul de log de temps est effectué juste après
print 'Temps',t,'min'
print ''
#
# Temps de production
#
tp=int(raw_input('Temps de production de fluides? (en min)\n'))
print ''
#############################################################
### ###
### Calcul de la dérivée de la pression ###
### ###
#############################################################
te,lt=agarwal(t,tp)
pf, dp, dt =deriv(lt,te,p)
#
# Plot loglog
#
popp='Pression'
plot_bourd(te,pf,dt,dp,popp)
hap='n'
while hap=='n':
pf, dp, dt = deriv(lt,te,p)
x_IARF, reg_dp, ind_IARF_deb, ind_IARF_fin, y_dp_0 = der_auto(dt,dp)
### Plotty time!
plot_bourd_s(te,pf,dt,dp,x_IARF,reg_dp,popp)
hap=raw_input('Alors heureux? (n = retrace Bourdet plot)')
print''
# y_dp_0=int(raw_input('Valeur de dp relevee avec pente de 0?'))
np.savetxt('dt_dp.csv', (dt,dp), delimiter=',')
###################################################################
### ###
### Récupération des données de débit et détermination ###
### de la méthode d'interprétation ###
### ###
###################################################################
var_q=int(raw_input('Le débit est il variable? Non = 0, Oui un peu (test court) = 1 et Beaucoup (test long) = reste'))
print ''
#
# 3 cas: q cst = méthode de Horner classique, q variable = temps apparent de
# Horner, q très variable = déconvolution à intégrer (pas fait)
#
if var_q==0: # Méthode: Horner plot classique (cas 1)
metho_q=0
q=int(raw_input('Valeur du débit en Mcf/d')) # Il suffit d'entrer le débit
print ''
tq=tp
tH=(tp+t)/t # Fonction de temps de Horner
tpH='null'
elif var_q==1: # Méthode: Horner plot avec un temps apparent de Horner qui dépend de q
# (cas 2)
metho_q=1
q_q=int(raw_input('Unité de débit? Mcf/d = 0, m3/s = 1, m3/j = 2')) # conversion du temps
print ''
if q_q==0:
unit_p='Mcf/d'
q=loadtxt("debits.txt")
elif q_p==1:
unit_p='m^3/s'
print 'Conversion m^3/s en Mcf/d'
print ''
q_m3s=loadtxt("debits.txt")
q=3051187.20*q_m3s
elif q_p==2:
print 'Conversion m^3/j en Mcf/d\n'
q_m3j=loadtxt("debits.txt")
q=35.3146667*q_m3j
else:
print 'Nouvelle conversion à intégrer au code'
print ''
unit_tq=raw_input('Unité de temps du débit de production? Jour = 0, heure = 1, minute = 2')
if unit_tq==1:
print 'Conversion h en min'
print ''
tq=60*loadtxt("tflow.txt")
elif unit_t==0:
print 'Conversion j en min'
print ''
tq=1440*loadtxt("tflow.txt")
else:
tq=loadtxt("tflow.txt")
#
# Calcul du temps apparent de Horner
#
Gp=sum(q*tq)
qglast=q[-1]
tpH=24*Gp/qglast
tH=(tpH+t)/t
print 'tpH', tpH, 'min'
print ''
else:
print 'Déconvolution à intégrer au code, respire, va boire un café, va manger un chien chaud'
print 'Temps',tq,'min'
print ''
print 'Débits',q,'Mcf/d', tq, 'min'
print ''
np.savetxt('t_p_te_pf_tH.csv', (t,p,te,pf,tH), delimiter=',')
np.savetxt('tq_q.csv', (tq,q), delimiter=',')
##################################################
### ###
### Température ###
### ###
##################################################
print 'Calcul des variables des gaz'
print ''
print('En l absence de donnees sur la nature du gaz on choisi les proprietes PVT du methane pur')
print ''
T_FU=float(raw_input('Temperature en Farenheit (0 si inconnue)?'))
print ''
#
# Test logique pour estimer la température si inconnue
#
if T_FU==0:
print 'Guestimation de la temperature des fluides au niveau du testeur'
print ''
z_ft=int(raw_input('Profondeur du testeur en pieds?'))
print ''
temp_moy_an=5
grad_G=0.02 # moyenne de Lefebvre - 1982 et Tran Ngoc et al - 2011
T_USI=temp_moy_an+grad_G*(z_ft*0.3048)
T_FU=T_USI*9/5+32
print 'T =',T_USI,'deg C ou',T_FU,'deg F'
print ''
T_USI=(T_FU-32)*5/9
###################################################################
### ###
### Calcul des variables des gaz ###
### ###
###################################################################
print"Calcul des variables des gaz, attention aux gaz choisis"
print''
R=10.732 # Constante des gaz parfaits en unités de terrain
#
# Calcul de T et p pseudo-réduites
#
# Methane
#M_g=16.042 # Masse molaire du méthane
#T_pc=-116.66 # T pseudo-critique du méthane
#p_pc=667.0 # p pseudo-critique du méthane
# Ethane
#M_g=30.069
#T_pc=89.92
#p_pc=706.6
mix=float(raw_input('Connaissance exacte de la mixture de gaz? Oui = 1, Non = 0'))
print''
if mix==1:
#
# Calcul de p_pr et T_pr par la méthode de Kay - 1936
#
print'Méthode Kay - 1936, Valable quand la portion de C7+ est faible.\n'
table=array([[16.042,667.0,-116.66],
[30.069,706.6,89.92],
[44.096,615.5,205.92],
[58.122,527.9,274.41],
[58.122,550.9,305.55],
[72.149,490.4,369],
[72.149,488.8,385.8],
[86.175,436.9,453.8],
[100.202,396.8,512.9],
[114.229,360.7,564.2],
[128.255,330.7,610.8],
[142.282,304.6,652.2],
[28.01,506.7,-220.63],
[44.01,1070.0,87.76],
[34.082,1306.5,212.81],
[28.959,551.9,-220.97],
[2.0159,190.7,-399.9],
[31.9988,731.4,-181.43],
[28.0135,492.5,-232.53],
[18.0153,3200.1,705.1]])
fichier = open("compo.csv", "r")
compo=array([])
for n in arange(1,21,1):
l=fichier.readline().rstrip('\n\r').split(",")
compo=np.append(compo,float(l[1]))
p_pc=sum(table[0,1]*compo)
T_pc=sum(table[0,2]*compo)
else:
#
# Calcul de p_pr et T_pr selon la méthode de Sutton - 2005
#
gamma_g=float(raw_input('Gravité du gaz? (-)'))
print''
M_g=gamma_g*28.9586
#
# 4.45 et 4.46 - Ezekwe (Pour les gaz condensés)
#
# Existe aussi pour les hydrocarbures associées mais ne fonctionne pas
# avec les gravités faibles observées
#
p_pc=744-125.4*gamma_g+5.9*gamma_g**2
T_pc=164.3+357.7*gamma_g-67.7*gamma_g**2 # en deg R
#
# 4.17 et 4.18 - Ezekwe
#
p_pr=p/p_pc
T_pr=(T_FU+460)/(T_pc)
print 'Temperature pseudo-reduite',T_pr,'R'
print ''
print 'Pression pseudo-reduite', p_pr,'psig (',len(p_pr),')'
print ''
#
# Calcul de z d'après la méthode de Newton-Raphson tirée de Sutton?
#
# Voir Ezekwe 4.????
#
A1=.3265
A2=-1.07
A3=-.5339
A4=.01569
A5=-.05165
A6=.5475
A7=-.7361
A8=.1844
A9=.1056
A10=.6134
A11=.721
c1=A1+A2/T_pr+A3/T_pr**3+A4/T_pr**4+A5/T_pr**5
c2=A6+A7/T_pr+A8/T_pr**2
c3=A9*(A7/T_pr+A8/T_pr**2)
z=1
for n in arange (1,50,1):
rho_r=.27*p_pr/(z*T_pr)
c4=A10*(1+A11*rho_r**2)*(rho_r**2/T_pr**3)*exp(-A11*rho_r**2)
z=z-(z-(1+c1*rho_r+c2*rho_r**2-c3*rho_r**5+c4))/(1+c1*rho_r/z+2*c2*rho_r**2/z-5*c3*rho_r**5/z+2*A10*rho_r**2/(z*T_pr**3)*(1+A11*rho_r**2-(A11*rho_r**2)**2)*exp(-A11*rho_r**2))
print 'Valeurs de z',z,'(-)','(',len(z),')'
print ''
#
# Calcul des variables manquantes
#
# 4.80-84 - Ezekwe from Lee et al - 1966
#
# Densité
#
rau_g=M_g*p/z/R/(T_FU+460) # résultat en lbm/ft/ft/ft
rau_gUSI=rau_g*0.01601846
print 'Densite du gaz', rau_gUSI, 'kg/m3','(',len(rau_gUSI),')'
print ''
#
# Viscosité
#
K=((9.379+0.01607*M_g)*(T_FU+460)**1.5)/(209.2+19.26*M_g+(T_FU+460));
X=3.448+986.4/(T_FU+460)+0.01009*M_g;
Y=2.447-0.2224*X;
mu_g=0.0001*K*exp(X*rau_gUSI**Y)
print 'Viscosite du gaz', mu_g, 'cp','(',len(mu_g),')'
print ''
#
# Formation Volume Factor
#
# 4.75 - Ezekwe
#
B_g=0.02819*z*(T_FU+459.67)/p
print 'FVF', B_g,'(-)','(',len(B_g),')'
print ''
#
# Il est encore possible de calculer c_t mais on ne s'en sert pas
#
###################################################################
### ###
### Calcul de la pseudo-pression ###
### ###
###################################################################
#
# Détermination de la pseudo pression d'apres Al-Hussainy et al - 1966
#
# Calcul de l'intégrale de la fonction p²/(mu_g*z) par la méthode des rectangles
#
# (l'intégrale correspond à la surface des rectangles sous la courbe)
#
# Voir dans Ezekwe - 2010, Exemple 10.8 p 324-326 et pseudop.py
#
f=2*p/(mu_g*z) # Définition de la fonction p²/(mu_g*z)
H=(p[1:]-p[:-1]) # Début du calcul de l'intégrale
A=(p[1:]-p[:-1])/2*(f[1:]+f[:-1]) # (base des rectanles)
pp=H[0]/2*f[0] # calcul de la première pseudo-pression
p_p=pp # on imprime les valeurs de pseudo-pression dans p_p
for A in A:
pp=A+pp # nouvelle pp = A + ancienne pp
p_p=np.append(p_p,pp) # on imprime les valeurs de pseudo-pression dans p_p
print 'pseudo-pression',p_p,'psi²/cp','(',len(p_p),')'
print''
np.savetxt('ppr_z_rhog_mug_FVF_pp.csv', (p_pr,z,rau_g,mu_g,B_g,p_p), delimiter=',')
###################################################################
### ###
### Bourdet plot de la pseudo-pression ###
### ###
###################################################################
#
# Calcul de la dérivé de la pseudo pression
#
pf, dp, dt=deriv(lt,te,p_p)
#
# Plot loglog
#
popp='Pseudo-pression'
plot_bourd(te,pf,dt,dp,popp)
#
# Boucle pour caler la droite de pente nulle de la dérivée
#
# sur le plot loglog
#
hap='n'
while hap=='n':
pf, dp, dt = deriv(lt,te,p_p)
### on reprend de nouvelles limites et on recalcule la dérivée à l'IARF
x_IARF, reg_dp, ind_IARF_deb, ind_IARF_fin, y_dp_0 = der_auto(dt,dp)
### Plotty time!
plot_bourd_s(te,pf,dt,dp,x_IARF,reg_dp,popp)
hap=raw_input('Alors heureux? (n = retrace Bourdet plot)')
np.savetxt('dtpp_dpp.csv', (dt,dp), delimiter=',')
###################################################################
### ###
### Horner plot de la pseudo-pression ###
### ###
###################################################################
#
# Calcul de la pente de l'IARF
#
slope = penteH(p_p,tH,ind_IARF_deb,ind_IARF_fin)
#
# Calcul de pp0
#
pp0=slope*log10(tH[ind_IARF_fin]/1)+p_p[ind_IARF_fin]
#pp0=int(raw_input('Pseudo-pression initiale du réservoir pp0? (psi2/cp)'))
print ''
print 'Pente de Horner', slope, '(psi²/cp)/cycle log'
print ''
print 'Pseudo-pression initiale du réservoir', pp0, '(psi²/cp)/cycle'
print ''
#
# Tracé de la droite correspondant à la pente de l'IARF
#
tH_pente=1
tH_pente=np.append(tH_pente,tH)
p_pente=-slope*log10(tH_pente)+pp0
#
# Horner plot de la pseudo-pression
#
hap='n'
while hap=='n':
ylab='pseudo-pression (psi2/cp)'
plot_horner(tH,p_p,tH_pente,p_pente,ylab,popp)
hap=raw_input('Alors heureux? (n = retrace Horner plot)')
#
# Horner plot avec pente déterminée graphiquement
#
mano=int(raw_input('Recalcul manuel de la pente ? 0 = Non, 1 = Oui'))
if mano==1:
hap='n'
while hap=='n':
pp0, slope, tH_pente, p_pente = penteM(tH)
ylab='pseudo-pression (psi2/cp)'
plot_horner(tH,p_p,tH_pente,p_pente,ylab,popp)
hap=raw_input('Alors heureux? (n = retrace Horner plot)')
np.savetxt('tHpp.csv', (tH), delimiter=',')
###################################################################
### ###
### Extrapolation de la pseudo-pression ###
### initiale pour déterminer la pression ###
### initiale du réservoir par régression ###
### polynomiale ###
### ###
###################################################################
#
# Régression polynomiale de pp / p de degré 2
#
# polyfit retourne les valeurs a, b et c du polynome pp = a*p^2 + b*p + c
#
cor_pp_p=np.polyfit(p_p,p,2)
#
# p_calc est la fonction pp = f(p) définie par polyfit
#
p_calc=np.poly1d(cor_pp_p)
#
# Critique science: calcul de R pour qualité corrélation
#
# graph pour visualiser la corrélation
#
#pylab.plot(p_p,p,'s',p_p,p_calc(p_p),'r-')
#pylab.xlabel('pseudo-presion (psi2/cp)')
#pylab.ylabel('pression (psi)')
#leg = plt.legend(('p mesure','p calcule'),'upper left', shadow=False)
#plt.grid(True)
#pylab.show()
p0=p_calc(pp0)
print 'Pression initiale dans le réservoir p0=',p0, 'psi'
print ''
###################################################################
### ###
### Calcul de la perméabilité du réservoir ###
### ###
###################################################################
z_inf=int(raw_input('Profondeur du bas de la zone investiguée ? (en pieds)'))
print ''
z_sup=int(raw_input('Profondeur du haut de la zone investiguée ? (en pieds)'))
print ''
h=z_inf-z_sup
print 'Épaisseur de la formation testée:',h,'pieds'
print''
#
# Calcul de k
#
m=slope
if var_q==1:
k=(1637*qglast*(T_FU+460))/(m*h) # calcul de k utilisant le temps apparent pour q variable
else:
k=(1637*q*(T_FU+460))/(m*h) # calcul de k utilisant le temps apparent pour q variable
print 'Perméabilité de la formation k=',k,'md'
#
# Horner plot avec k et p0
#
plot_horner_f(tH,p_p,tH_pente,p_pente,ylab,popp,k,p0)
#
# Définition d'une matrice de résultats intéressants modifiable
#
pouits=raw_input("Cote du puits?")
print ''
num=raw_input("Numéro d'essai?")
print ''
typ=raw_input("ISI ou FSI?")
print ''
Essai=pouits+num+typ
hkb=raw_input('Élévation du Kelly Bushing (même unité que z packer)')
print''
results=array([Essai,'k (md)',k,'pression intiale (psi)',p0,'Température °C',T_USI,'Température °F',T_FU,'Bas de la zone investiguée (ft)',z_inf,'Haut de la zone investiguée (ft)',z_sup,'Épaisseur (ft)',h,'Élévation du KB (ft)',hkb,'Temps de production (min)',tp,"Temps de production de Horner (min)",tpH,'Gravité du gaz (-)',gamma_g,'Température pseudo réduite (-)',T_pr,'Ordonnée de la droite de pente nulle (psi2/cp)',y_dp_0,'Pente de la droite de Horner de pp (psi2/cp)',slope,'Pseudo-pression initiale (psi2/cp)',pp0])
#
# Enregistrement des résultats dans un fichier *.csv
#
results.tofile(file='resultats.csv', sep='\n')
#
# Ouverture d'un éditeur de texte pour visualiser/copier les résultats
#
os.system('leafpad resultats.csv')
#
# Fun avec LaTeX
#
l1='\\documentclass[10pt]{article} \n\\usepackage[utf8x]{inputenc} \n\\usepackage[frenchb,english]{babel} \n\\usepackage[T1]{fontenc} \n\\usepackage{lmodern} \n\n\\begin{document} \n\n\\begin{table}'
l2='%s %s %s' % ('\\caption{Resultats de l essai',Essai,'}')
l3='\\begin{center}\n\\begin{tabular}{|c|c|c|}\n\\hline'
l4='\n\\bfseries Perméabilité & %s & md\\\\ \n' % k
l5='\n\\bfseries Pression intiale & %s & psi\\\\ \n' % p0
l6='\n\\bfseries Température & %s & ° C\\\\ \n' % T_USI
l7='\n\\bfseries Température & %s & ° F\\\\ \n' % T_FU
l8='\n\\bfseries Bas de la zone investiguée & %s & ft\\\\ \n' % z_inf
l9='\n\\bfseries Haut de la zone investiguée & %s & ft\\\\ \n' % z_sup
l10='\n\\bfseries Épaisseur & %s & ft\\\\ \n' % h
l11='\n\\bfseries Élévation du KB & %s & ft\\\\ \n' % hkb
l12='\n\\bfseries Temps de production & %s & min\\\\ \n' % tp
l13='\n\\bfseries Temps de production de Horner & %s & min\\\\ \n' % tpH
l14='\n\\bfseries Gravité du gaz & %s & -\\\\ \n' % gamma_g
l15='\n\\bfseries Température pseudo-réduite & %s & -\\\\ \n' % T_pr
l16='\n\\bfseries Ordonnée de la droite de pente nulle & %s & psi$^2$/cp\\\\ \n' % y_dp_0
l17='\n\\bfseries Pente de la droite de Horner & %s & psi$^2$/cp/cycle log\\\\ \n' % slope
l18='\n\\bfseries Pseudo-pression intiale & %s & psi$^2$/cp\\\\ \n' % pp0
llast='\\hline\n\\end{tabular}\n\\end{center}\n\\end{table}\n\n\\end{document}'
crlatex=array([l1,l2,l3,l4,l5,l6,l7,l8,l9,l10,l11,l12,l13,l14,l15,l16,l17,l18,llast])
crlatex.tofile(file='woup.tex', sep='\n')
os.system('latex woup.tex')
os.system('xdvi woup.dvi')
#
# Message de fin
#
print' -'
print" - - - - - Tout est bien qui fini bien. - - - - - "
print" -"
print" -"
print" -"
print ''
#>>> a='%s %d %s' % ('\\bfseries k &',k,'& md \\ \\')
#>>> results=array([a,b,c])
#>>> results.tofile(file='resultats.txt', sep='\n')
|
mit
|
dsullivan7/scikit-learn
|
sklearn/linear_model/setup.py
|
169
|
1567
|
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
lbishal/scikit-learn
|
sklearn/neighbors/approximate.py
|
30
|
22370
|
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
|
bsd-3-clause
|
andressotov/News-Categorization-MNB
|
News_Categorization_MNB.py
|
1
|
14641
|
# coding: utf-8
# # News Categorization using Multinomial Naive Bayes
# ## by Andrés Soto
# Once upon a time, while searching by internet, I discovered [this site](https://www.kaggle.com/uciml/news-aggregator-dataset), where I found this challenge:
# * Using the News Aggregator Data Set, can we predict the category (business, entertainment, etc.) of a news article given only its headline?
# So I decided to try to do it using the Multinomial Naive Bayes method.
# The News Aggregator Data Set comes from the UCI Machine Learning Repository.
# * Lichman, M. (2013). [UCI Machine Learning Repository](http://archive.ics.uci.edu/ml). Irvine, CA: University of California, School of Information and Computer Science.
# This specific dataset can be found in the UCI ML Repository at [this URL](http://archive.ics.uci.edu/ml/datasets/News+Aggregator).
# This dataset contains headlines, URLs, and categories for 422,937 news stories collected by a web aggregator between March 10th, 2014 and August 10th, 2014. News categories in this dataset are labelled:
# Label | Category | News
# -------|------------|----------
# b | business | <div style="text-align: right"> 115967 </div>
# t | science and technology | <div style="text-align: right"> 108344 </div>
# e | entertainment | <div style="text-align: right"> 152469 </div>
# m | health | <div style="text-align: right"> 45639 </div>
# Multinomial Naive Bayes method will be used to predict the category (business, entertainment, etc.) of a news article given only its headline. The paper is divided in four sections. The first section is dedicated to importing the data set and getting some preliminary information about it. Second section explains how to divide data in two sets: the training set and the test set. Section number 3 is about training and testing the classification algorithm and obtaining results. Results analysis constitute the last section.
# ## Import data
# To import the data from the CSV file, we will use [Pandas library](http://pandas.pydata.org/) which also offers data structures and operations for manipulating data tables. Therefore, we need to import Pandas library.
# To embed plots inside the Notebook, we use the "%matplotlib inline" [magic command](http://ipython.readthedocs.io/en/stable/interactive/magics.html#).
# In[1]:
get_ipython().magic('matplotlib inline')
import pandas as pd
# Now, we have to initialize some variables that will be used. They will be used to collect the news titles, its categories, as well as a list of the different possible categories (without repetitions).
# In[2]:
titles = [] # list of news titles
categories = [] # list of news categories
labels = [] # list of different categories (without repetitions)
nlabels = 4 # number of different categories
lnews = [] # list of dictionaries with two fields: one for the news and
# the other for its category
# The code for this section will be organized in two functions: one which imports the data and the other which counts the news in each category, its percentage and plots it.
# In[3]:
def import_data():
global titles, labels, categories
# importing news aggregator data via Pandas (Python Data Analysis Library)
news = pd.read_csv("uci-news-aggregator.csv")
# function 'head' shows the first 5 items in a column (or
# the first 5 rows in the DataFrame)
print(news.head())
categories = news['CATEGORY']
titles = news['TITLE']
labels = sorted(list(set(categories)))
# Let's see how long it takes to import the data by %time [magic command](https://ipython.org/ipython-doc/3/interactive/magics.html).
# In[4]:
get_ipython().magic('time import_data()')
# The time to import the dat was 3.54 seconds. Let's analyze how many news we have from the different categories and its percentage. We will use the [class Counter](https://docs.python.org/3/library/collections.html#counter-objects) from the collections library, which keeps track of how many values contains a collection. Then we will tabulate the different categories and its percentage via a DataFrame.
# In[5]:
from collections import Counter
def count_data(labels,categories):
c = Counter(categories)
cont = dict(c)
# total number of news
tot = sum(list(cont.values()))
d = {
"category" : labels,
"news" : [cont[l] for l in labels],
"percent" : [cont[l]/tot for l in labels]
}
print(pd.DataFrame(d))
print("total \t",tot)
return cont
cont = count_data(labels,categories)
# Let's show a [pie plot](http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py) with the proportion of news by category.
# In[6]:
import pylab as pl # useful for drawing graphics
def categories_pie_plot(cont,tit):
global labels
sizes = [cont[l] for l in labels]
pl.pie(sizes, explode=(0, 0, 0, 0), labels=labels,
autopct='%1.1f%%', shadow=True, startangle=90)
pl.title(tit)
pl.show()
categories_pie_plot(cont,"Plotting categories")
# As we can see, the entertainment (e) category is the biggest one, which is more than three times bigger than health (m) category. In second place we have business (b) and technology (t), which are more than two times bigger than health category.
# ## Splitting the data
# Now we should split our data into two sets:
# 1. a training set (70%) used to discover potentially predictive relationships, and
# 2. a test set (30%) used to evaluate whether the discovered relationships hold and to assess the strength and utility of a predictive relationship.
# Before splitting it, the data should be first permuted. [Shuffle](http://scikit-learn.org/stable/modules/generated/sklearn.utils.shuffle.html) is a method included in scikit-learn library which allows to do random permutations of collections. Then data could be splitted into a pair of train and test sets.
# In[7]:
from sklearn.utils import shuffle # Shuffle arrays in a consistent way
X_train = []
y_train = []
X_test = []
y_test = []
def split_data():
global titles, categories
global X_train, y_train, X_test, y_test,labels
N = len(titles)
Ntrain = int(N * 0.7)
# Let's shuffle the data
titles, categories = shuffle(titles, categories, random_state=0)
X_train = titles[:Ntrain]
y_train = categories[:Ntrain]
X_test = titles[Ntrain:]
y_test = categories[Ntrain:]
# In[8]:
get_ipython().magic('time split_data()')
# Time required to split data is 1.28 seconds. Now let's analyze the proportion of news categories in the training set.
# In[9]:
cont2 = count_data(labels,y_train)
# Percentage are very much close to the ones obtained for the whole data set.
# In[10]:
categories_pie_plot(cont2,"Categories % in training set")
# ## Train and test the classifier
# In order to train and test the classifier, the first step should be to tokenize and count the number of occurrence of each word that appears into the news'titles using for that [CountVectorizer class](http://scikit-learn.org/stable/modules/feature_extraction.html#common-vectorizer-usage). Each term found is assigned a unique integer index.
# Then the counters will be transformed to a TF-IDF representation using [TfidfTransformer class](http://scikit-learn.org/stable/modules/feature_extraction.html#tfidf-term-weighting). The last step creates the [Multinomial Naive Bayes classifier](http://scikit-learn.org/stable/modules/naive_bayes.html#multinomial-naive-bayes).
# In order to make the training process easier, scikit-learn provides a [Pipeline class](http://scikit-learn.org/stable/modules/pipeline.html) that behaves like a compound classifier.
# The [metrics module](http://scikit-learn.org/stable/modules/classes.html) allows to calculate score functions, performance metrics and pairwise metrics and distance computations. F1-score can be interpreted as a weighted average of the [precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall).
# In[11]:
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn import metrics
import numpy as np
import pprint
# lmats = [] # list of confussion matrix
nrows = nlabels
ncols = nlabels
# conf_mat_sum = np.zeros((nrows, ncols))
# f1_acum = [] # list of f1-score
def train_test():
global X_train, y_train, X_test, y_test, labels
#lmats, \
# conf_mat_sum, f1_acum, ncategories
text_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
text_clf = text_clf.fit(X_train, y_train)
predicted = text_clf.predict(X_test)
return predicted
# In[12]:
get_ipython().magic('time predicted = train_test()')
# To compare the predicted labels to the corresponding set of true labels we will use the [method accuracy_score from scikit-learn](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html), which gives an accuracy over 0.92
# In[13]:
metrics.accuracy_score(y_test, predicted)
# To show the main classification metrics we will use the [classification_report method](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html) from scikit-learn.
# In[14]:
print(metrics.classification_report(y_test, predicted, target_names=labels))
# We can see that, although the metrics (precision, recall and f1-score) in average give us 0.92, the results for category e (entertainment) are even better.
# [Confusion matrix](http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html) allows to detect if a classification algorithm is confusing two or more classes if you have an unequal number of observations in each class as in this case. An ideal classifier with 100% accuracy would produce a pure diagonal matrix which would have all the points predicted in their correct class. In case of class imbalance, confusion matrix normalization by class support size (number of elements in each class) can be interesting in order to have a visual interpretation of which class is being misclassified.
# In[15]:
mat = metrics.confusion_matrix(y_test, predicted,labels=labels)
cm = mat.astype('float') / mat.sum(axis=1)[:, np.newaxis]
cm
# Let's print a plot for the confussion matrix.
# In[16]:
import itertools
import matplotlib.pyplot as plt
def plot_confusion_matrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, '{:5.2f}'.format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.colorbar()
plt.show()
# In[17]:
plot_confusion_matrix(cm, labels, title='Confusion matrix')
# Confussion matrix columns represent the instances in a predicted class while rows represent the instances in an actual class. The diagonal elements represent the number of points for which the predicted label is equal to the true label, while off-diagonal elements are those that are mislabeled by the classifier. The higher the diagonal values of the confusion matrix the better, indicating many correct predictions.
# Now, let's see the relation between [f1-score](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html) and the percentage by category
# In[18]:
def resume_data(labels,y_train,f1s):
c = Counter(y_train)
cont = dict(c)
tot = sum(list(cont.values()))
nlabels = len(labels)
d = {
"category" : [labels[i] for i in range(nlabels)],
"percent" : [cont[labels[i]]/tot for i in range(nlabels)],
"f1-score" : [f1s[i] for i in range(nlabels)]
}
print(pd.DataFrame(d))
print("total \t",tot)
return cont
# In[19]:
f1s = metrics.f1_score(y_test, predicted, labels=labels, average=None)
cont3 = resume_data(labels,y_train,f1s)
# ## Results analysis
# As a resume, results show a good accuracy (0.9238) with a good average level for precision, recall and f1-score (0.92) Analyzing these results by category, results are even better for the entertainment category ('e') with 0.96 for f1-score, 0.97 for recall and 0.95 for precision. I would like to highlight that best result for prediction corresponds to health category ('m') with 0.97, but with a recall of 0.85. Other categories show a more even behavior.
# Analyzing confusion matrix results, the higher index of points predicted in their correct class for category 'e' with 0.9719. This category presents a misclassification index of 0.014 for technology category ('t') and lower indexes for the other categories.
# On the contrary, category 'm' presents the worst hit rate with an indicator of 0.846, which has misclassification indexes of 0.062 with business category ('b'), of 0.0619 with category 'e' and of 0.03 with category 't'.
# Analyzing the number of news by category, category 'e' presents the higher percentage, 36%, with 45625 news. On the other hand, category 'm' presents the lower percentage, 10.79%, with just 13709 news. Thus, category 'e' is more than three times bigger than category 'm'. Categories 'b' and 't' present similar number of news and percentage: 'b' has 34729 news with 27%, and 't' has 32663 news with a 25%. Both categories, 'b' and 't', are more than two times bigger than category 'm'. According to these, better results seem to correspond with higher percentage categories. In future experiments, I would try to confirm this hypothesis.
# In this experience, we just trained the classification algorithm with one set of data, so we just have one set of results. Although the training set and the test set were selected by random, it is just a sample of the possible results. In future experiments, I would try to test which is the confidence of the experiment results.
|
mit
|
rseubert/scikit-learn
|
sklearn/manifold/isomap.py
|
36
|
7119
|
"""Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
bsd-3-clause
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/pandas/core/style.py
|
9
|
22789
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from functools import partial
from contextlib import contextmanager
from uuid import uuid1
import copy
from collections import defaultdict
try:
from jinja2 import Template
except ImportError:
msg = "pandas.Styler requires jinja2. "\
"Please install with `conda install Jinja2`\n"\
"or `pip install Jinja2`"
raise ImportError(msg)
import numpy as np
import pandas as pd
from pandas.compat import lzip
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the
data with HTML and CSS.
.. versionadded:: 0.17.1
.. warning::
This is a new feature and is under active development.
We'll be adding features and possibly making breaking changes in future
releases.
Parameters
----------
data: Series or DataFrame
precision: int
precision to round floats to, defaults to pd.options.display.precision
table_styles: list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid: str, default None
a unique identifier to avoid CSS collisons; generated automatically
caption: str, default None
caption to attach to the table
Attributes
----------
tempate: Jinja Template
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the genterated HTML.
See Also
--------
pandas.DataFrame.style
"""
template = Template("""
<style type="text/css" >
{% for s in table_styles %}
#T_{{uuid}} {{s.selector}} {
{% for p,val in s.props %}
{{p}}: {{val}};
{% endfor %}
}
{% endfor %}
{% for s in cellstyle %}
#T_{{uuid}}{{s.selector}} {
{% for p,val in s.props %}
{{p}}: {{val}};
{% endfor %}
}
{% endfor %}
</style>
<table id="T_{{uuid}}" {{ table_attributes }}>
{% if caption %}
<caption>{{caption}}</caption>
{% endif %}
<thead>
{% for r in head %}
<tr>
{% for c in r %}
<{{c.type}} class="{{c.class}}">{{c.value}}
{% endfor %}
</tr>
{% endfor %}
</thead>
<tbody>
{% for r in body %}
<tr>
{% for c in r %}
<{{c.type}} id="T_{{uuid}}{{c.id}}" class="{{c.class}}">
{% if c.value is number %}
{{c.value|round(precision)}}
{% else %}
{{c.value}}
{% endif %}
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
""")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indicies.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = pd.options.display.precision
self.precision = precision
self.table_attributes = table_attributes
def _repr_html_(self):
'''
Hooks into Jupyter notebook rich display system.
'''
return self.render()
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
idx_values = self.data.index.format(sparsify=False, adjoin=False,
names=False)
idx_values = lzip(*idx_values)
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
row_es = [{"type": "th", "value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])}] * n_rlvls
for c in range(len(clabels[0])):
cs = [COL_HEADING_CLASS, "level%s" % r, "col%s" % c]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
row_es.append({"type": "th", "value": clabels[r][c],
"class": " ".join(cs)})
head.append(row_es)
body = []
for r, idx in enumerate(self.data.index):
cs = [ROW_HEADING_CLASS, "level%s" % c, "row%s" % r]
cs.extend(cell_context.get(
"row_headings", {}).get(r, {}).get(c, []))
row_es = [{"type": "th",
"value": rlabels[r][c],
"class": " ".join(cs)}
for c in range(len(rlabels[r]))]
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row%s" % r, "col%s" % c]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
row_es.append({"type": "td", "value": self.data.iloc[r][c],
"class": " ".join(cs), "id": "_".join(cs[1:])})
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append(
{'props': props,
'selector': "row%s_col%s" % (r, c)}
)
body.append(row_es)
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=self.table_attributes)
def render(self):
"""
Render the built up styles to HTML
.. versionadded:: 0.17.1
Returns
-------
rendered: str
the rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the resul in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
"""
self._compute()
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle'] if
any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
update the state of the Styler. Collects a mapping
of {index_label: ['<property>: <value>']}
attrs: Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
'''
"Reset" the styler, removing any previously applied styles.
Returns None.
'''
self.ctx.clear()
self._todo = []
def _compute(self):
'''
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
'''
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
if axis is not None:
result = self.data.loc[subset].apply(func, axis=axis, **kwargs)
else:
# like tee
result = func(self.data.loc[subset], **kwargs)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wase,
updating the HTML representation with the result.
.. versionadded:: 0.17.1
Parameters
----------
func: function
axis: int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``.
subset: IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs: dict
pass along to ``func``
Returns
-------
self
Notes
-----
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset),
kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
.. versionadded:: 0.17.1
Parameters
----------
func : function
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset),
kwargs))
return self
def set_precision(self, precision):
"""
Set the precision used to render.
.. versionadded:: 0.17.1
Parameters
----------
precision: int
Returns
-------
self
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes. These are the items
that show up in the opening ``<table>`` tag in addition
to to automatic (by default) id.
.. versionadded:: 0.17.1
Parameters
----------
precision: int
Returns
-------
self
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
.. versionadded:: 0.17.1
Returns
-------
styles: list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
.. versionadded:: 0.17.1
Parameters
----------
styles: list
list of style functions
Returns
-------
self
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
.. versionadded:: 0.17.1
Parameters
----------
uuid: str
Returns
-------
self
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Se the caption on a Styler
.. versionadded:: 0.17.1
Parameters
----------
caption: str
Returns
-------
self
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler
.. versionadded:: 0.17.1
Parameters
----------
table_styles: list
Returns
-------
self
"""
self.table_styles = table_styles
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return 'background-color: %s' % null_color if pd.isnull(v) else ''
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
.. versionadded:: 0.17.1
Parameters
----------
null_color: str
Returns
-------
self
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0,
axis=0, subset=None):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
.. versionadded:: 0.17.1
Parameters
----------
cmap: str or colormap
matplotlib colormap
low, high: float
compress the range by these values.
axis: int or str
1 or 'columns' for colunwise, 0 or 'index' for rowwise
subset: IndexSlice
a valid slice for ``data`` to limit the style application to
Returns
-------
self
Notes
-----
Tune ``low`` and ``high`` to keep the text legible by
not using the entire range of the color map. These extend
the range of the data by ``low * (x.max() - x.min())``
and ``high * (x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0):
"""Color background in a range according to the data."""
with _mpl(Styler.background_gradient) as (plt, colors):
rng = s.max() - s.min()
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(s.min() - (rng * low),
s.max() + (rng * high))
# matplotlib modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
normed = norm(s.values)
c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]
return ['background-color: %s' % color for color in c]
def set_properties(self, subset=None, **kwargs):
"""
Convience method for setting one or more non-data dependent
properties or each cell.
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs: dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v) for p, v in
kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, color, width):
normed = width * (s - s.min()) / (s.max() - s.min())
attrs = 'width: 10em; height: 80%;'\
'background: linear-gradient(90deg,'\
'{c} {w}%, transparent 0%)'
return [attrs.format(c=color, w=x) for x in normed]
def bar(self, subset=None, axis=0, color='#d65f5f', width=100):
"""
Color the background ``color`` proptional to the values in each column.
Excludes non-numeric data by default.
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
axis: int
color: str
width: float
A number between 0 or 100. The largest value will cover ``width``
percent of the cell's width
Returns
-------
self
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis, color=color,
width=width)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color: str, default 'yellow'
axis: int, str, or None; default None
0 or 'index' for columnwise, 1 or 'columns' for rowwise
or ``None`` for tablewise (the default)
Returns
-------
self
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color: str, default 'yellow'
axis: int, str, or None; default None
0 or 'index' for columnwise, 1 or 'columns' for rowwise
or ``None`` for tablewise (the default)
Returns
-------
self
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
'''
highlight the min or max in a Series or DataFrame
'''
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
|
gpl-2.0
|
NicovincX2/Python-3.5
|
Statistiques/Estimation (statistique)/Régression/Lasso (statistiques)/lasso_elasticnet.py
|
1
|
1707
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
os.system("pause")
|
gpl-3.0
|
LouisePaulDelvaux/openfisca-france-data
|
openfisca_france_data/input_data_builders/build_openfisca_indirect_taxation_survey_data/step_0_4_homogeneisation_revenus_menages.py
|
1
|
15961
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import pandas
from openfisca_survey_manager.survey_collections import SurveyCollection
from openfisca_france_data import default_config_files_directory as config_files_directory
from openfisca_france_data.temporary import TemporaryStore
log = logging.getLogger(__name__)
temporary_store = TemporaryStore.create(file_name = "indirect_taxation_tmp")
def build_homogeneisation_revenus_menages(year = None):
"""Build menage consumption by categorie fiscale dataframe """
assert year is not None
# Load data
bdf_survey_collection = SurveyCollection.load(
collection = 'budget_des_familles', config_files_directory = config_files_directory)
survey = bdf_survey_collection.get_survey('budget_des_familles_{}'.format(year))
# **********************************************************************************************************************
# ********************************* HOMOGENEISATION DES DONNEES SUR LES REVENUS DES MENAGES ****************************
# ************************************ CALCUL D'UN PROXI DU REVENU DISPONIBLE DES MENAGES ******************************
# **********************************************************************************************************************
#
# ********************HOMOGENEISATION DES BASES DE RESSOURCES***************************
# /* La base 95 permet de distinguer taxe d'habitation et impôts fonciers. On calcule leur montant relatif pour l'appliquer à 00 et 05 */
if year == 1995:
menrev = survey.get_values(
table = "menrev",
variables = [
'revtot', 'ir', 'irbis', 'imphab', 'impfon', 'revaid', 'revsal', 'revind', 'revsec', 'revret',
'revcho', 'revfam', 'revlog', 'revinv', 'revrmi', 'revpat', 'mena', 'ponderr'
],
)
menage = survey.get_values(
table = "socioscm",
variables = ['exdep', 'exrev', 'mena']
)
menage.set_index('mena')
menrev = menrev.merge(menage, left_index = True, right_index = True)
# cette étape de ne garder que les données dont on est sûr de la qualité et de la véracité
# exdep = 1 si les données sont bien remplies pour les dépenses du ménage
# exrev = 1 si les données sont bien remplies pour les revenus du ménage
menrev = menrev[(menrev.exdep == 1) & (menrev.exrev == 1)]
menrev['foncier_hab'] = menrev.imphab + menrev.impfon
menrev['part_IMPHAB'] = menrev.imphab / menrev.foncier_hab
menrev['part_IMPFON'] = menrev.impfon / menrev.foncier_hab
menrev['revsoc'] = (
menrev.revret + menrev.revcho + menrev.revfam + menrev.revlog + menrev.revinv + menrev.revrmi
)
for variable in ['revcho', 'revfam', 'revinv', 'revlog', 'revret', 'revrmi']:
del menrev[variable]
menrev['revact'] = menrev['revsal'] + menrev['revind'] + menrev['revsec']
menrev.rename(
columns = dict(
revpat = "revpat",
impfon = "impfon",
imphab = "imphab",
revaid = "somme_obl_recue",
),
inplace = True
)
menrev['impot_revenu'] = menrev['ir'] + menrev['irbis']
rev_disp = survey.get_values(
table = "menrev",
variables = ['revtot', 'revret', 'revcho', 'revfam', 'revlog', 'revinv', 'revrmi', 'imphab', 'impfon', 'revaid', 'revsal', 'revind', 'revsec', 'revpat', 'mena', 'ponderr', 'ir','irbis' ],
)
rev_disp.set_index('mena', inplace=True)
menage2 = survey.get_values(
table = "socioscm",
variables = ['exdep', 'exrev', 'mena']
)
menage2.set_index('mena', inplace = True)
rev_disp = menage2.merge(rev_disp, left_index = True, right_index = True)
rev_disp = rev_disp[(rev_disp.exrev == 1) & (rev_disp.exdep == 1)]
rev_disp['revsoc'] = rev_disp['revret'] + rev_disp['revcho'] + rev_disp['revfam'] + rev_disp['revlog'] + rev_disp['revinv'] + rev_disp['revrmi']
rev_disp['impot_revenu'] = rev_disp['ir'] + rev_disp['irbis']
rev_disp.rename(
columns = dict(
revaid = 'somme_obl_recue',
),
inplace = True
)
rev_disp.somme_obl_recue = rev_disp.somme_obl_recue.fillna(0)
rev_disp['revact'] = rev_disp['revsal'] + rev_disp['revind'] + rev_disp['revsec']
rev_disp['revtot'] = rev_disp['revact'] + rev_disp['revpat'] + rev_disp['revsoc'] + rev_disp['somme_obl_recue']
rev_disp['revact'] = rev_disp['revsal'] + rev_disp['revind'] + rev_disp['revsec']
rev_disp.rename(
columns = dict(
ponderr = "pondmen",
mena = "ident_men",
revind = "act_indpt",
revsal = "salaires",
revsec = "autres_rev",
),
inplace = True
)
rev_disp['autoverses'] = '0'
rev_disp['somme_libre_recue'] = '0'
rev_disp['autres_ress'] = '0'
#
# /* Le revenu disponible se calcule à partir de revtot à laquelle on retrancher la taxe d'habitation
# et l'impôt sur le revenu, plus éventuellement les CSG et CRDS.
# La variable revtot est la somme des revenus d'activité, sociaux, du patrimoine et d'aide. */
#
rev_disp['rev_disponible'] = rev_disp.revtot - rev_disp.impot_revenu - rev_disp.imphab
loyers_imputes = temporary_store['depenses_bdf_{}'.format(year)]
loyers_imputes.rename(
columns = {"0411": "loyer_impute"},
inplace = True,
)
rev_dispbis = loyers_imputes.merge(rev_disp, left_index = True, right_index = True)
rev_disp['rev_disp_loyerimput'] = rev_disp['rev_disponible'] - rev_dispbis['loyer_impute']
for var in ['somme_obl_recue', 'act_indpt', 'revpat', 'salaires', 'autres_rev', 'rev_disponible', 'impfon', 'imphab', 'revsoc', 'revact', 'impot_revenu', 'revtot', 'rev_disp_loyerimput'] :
rev_disp[var] = rev_disp[var] / 6.55957
# * CONVERSION EN EUROS
temporary_store["revenus_{}".format(year)] = rev_disp
elif year == 2000:
# TODO: récupérer plutôt les variables qui viennent de la table dépenses (dans temporary_store)
consomen = survey.get_values(
table = "consomen",
variables = ['c13141', 'c13111', 'c13121', 'c13131', 'pondmen', 'ident'],
)
rev_disp = consomen.sort(columns = ['ident'])
del consomen
menage = survey.get_values(
table = "menage",
variables = ['ident', 'revtot', 'revact', 'revsoc', 'revpat', 'rev70', 'rev71', 'revt_d', 'pondmen', 'rev10', 'rev11', 'rev20', 'rev21'],
).sort(columns = ['ident'])
revenus = menage.join(rev_disp, how = "outer", rsuffix = "rev_disp")
revenus.rename(
columns = dict(
c13111 = "impot_res_ppal",
c13141 = "impot_revenu",
c13121 = "impot_autres_res",
rev70 = "somme_obl_recue",
rev71 = "somme_libre_recue",
revt_d= "autres_ress",
ident = "ident_men",
rev10 = "act_indpt",
rev11 = "autoverses",
rev20 = "salaires",
rev21 = "autres_rev",
),
inplace = True
)
var_to_ints = ['pondmen','impot_autres_res','impot_res_ppal','pondmenrev_disp','c13131']
for var_to_int in var_to_ints:
revenus[var_to_int] = revenus[var_to_int].astype(int)
revenus['imphab'] = 0.65 * (revenus.impot_res_ppal + revenus.impot_autres_res)
revenus['impfon'] = 0.35 * (revenus.impot_res_ppal + revenus.impot_autres_res)
loyers_imputes = temporary_store["depenses_bdf_{}".format(year)]
variables = ["0421"]
loyers_imputes = loyers_imputes[variables]
loyers_imputes.rename(
columns = {"0421": "loyer_impute"},
inplace = True,
)
temporary_store["loyers_imputes_{}".format(year)] = loyers_imputes
loyers_imputes.index = loyers_imputes.index.astype('int')
revenus = revenus.set_index('ident_men')
revenus.index = revenus.index.astype('int')
revenus = revenus.merge(loyers_imputes, left_index = True, right_index = True)
revenus['rev_disponible'] = revenus.revtot - revenus.impot_revenu - revenus.imphab
revenus['rev_disponible'] = revenus['rev_disponible'] * (revenus['rev_disponible'] >= 0)
revenus['rev_disp_loyerimput'] = revenus.rev_disponible + revenus.loyer_impute
var_to_ints = ['loyer_impute']
for var_to_int in var_to_ints:
revenus[var_to_int] = revenus[var_to_int].astype(int)
temporary_store["revenus_{}".format(year)] = revenus
elif year == 2005:
c05d = survey.get_values(
table = "c05d",
variables = ['c13111', 'c13121', 'c13141', 'pondmen', 'ident_men'],
)
rev_disp = c05d.sort(columns = ['ident_men'])
del c05d
menage = survey.get_values(
table = "menage",
variables = ['ident_men', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700_d', 'rev701_d',
'rev999_d', 'rev100_d', 'rev101_d', 'rev200_d', 'rev201_d'],
).sort(columns = ['ident_men'])
rev_disp.set_index('ident_men', inplace = True)
menage.set_index('ident_men', inplace = True)
revenus = pandas.concat([menage, rev_disp], axis = 1)
revenus.rename(
columns = dict(
rev100_d = "act_indpt",
rev101_d = "autoverses",
rev200_d = "salaires",
rev201_d = "autres_rev",
rev700_d = "somme_obl_recue",
rev701_d = "somme_libre_recue",
rev999_d = "autres_ress",
c13111 = "impot_res_ppal",
c13141 = "impot_revenu",
c13121 = "impot_autres_res",
),
inplace = True
)
# * Ces pondérations (0.65 0.35) viennent de l'enquête BdF 1995 qui distingue taxe d'habitation et impôts fonciers. A partir de BdF 1995,
# * on a calculé que la taxe d'habitation représente en moyenne 65% des impôts locaux, et que les impôts fonciers en représentenr 35%.
# * On applique ces taux aux enquêtes 2000 et 2005.
# gen imphab= 0.65*(impot_res_ppal + impot_autres_res)
# gen impfon= 0.35*(impot_res_ppal + impot_autres_res)
# drop impot_autres_res impot_res_ppal
revenus['imphab'] = 0.65 * (revenus.impot_res_ppal + revenus.impot_autres_res)
revenus['impfon'] = 0.35 * (revenus.impot_res_ppal + revenus.impot_autres_res)
del revenus['impot_autres_res']
del revenus['impot_res_ppal']
# * Calculer le revenu disponible avec et sans le loyer imputé
loyers_imputes = temporary_store["depenses_bdf_{}".format(year)]
variables = ["0421"]
loyers_imputes = loyers_imputes[variables]
loyers_imputes.rename(
columns = {"0421": "loyer_impute"},
inplace = True,
)
temporary_store["loyers_imputes_{}".format(year)] = loyers_imputes
revenus = revenus.merge(loyers_imputes, left_index = True, right_index = True)
revenus['rev_disponible'] = revenus.revtot - revenus.impot_revenu - revenus.imphab
revenus['rev_disponible'] = revenus['rev_disponible'] * (revenus['rev_disponible'] >= 0)
revenus['rev_disp_loyerimput'] = revenus.rev_disponible + revenus.loyer_impute
temporary_store["revenus_{}".format(year)] = revenus
elif year == 2011:
try:
c05 = survey.get_values(
table = "C05",
variables = ['c13111', 'c13121', 'c13141', 'pondmen', 'ident_me'],
)
except:
c05 = survey.get_values(
table = "c05",
variables = ['c13111', 'c13121', 'c13141', 'pondmen', 'ident_me'],
)
rev_disp = c05.sort(columns = ['ident_me'])
del c05
try:
menage = survey.get_values(
table = "MENAGE",
variables = ['ident_me', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700', 'rev701', 'rev999', 'revindep', 'salaires'],
).sort(columns = ['ident_me'])
except:
menage = survey.get_values(
table = "menage",
variables = ['ident_me', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700', 'rev701', 'rev999', 'revindep', 'salaires'],
).sort(columns = ['ident_me'])
# variables = ['ident_me', 'revtot', 'revact', 'revsoc', 'revpat', 'rev700', 'rev701', 'rev999', 'revindep', 'rev101_d', 'salaires', 'rev201'],
rev_disp.set_index('ident_me', inplace = True)
menage.set_index('ident_me', inplace = True)
revenus = pandas.concat([menage, rev_disp], axis = 1)
revenus.rename(
columns = dict(
revindep = "act_indpt",
#TODO: trouver ces revenus commentés dans bdf 2011
# rev101_d = "autoverses",
salaires = "salaires",
# rev201_d = "autres_rev",
rev700 = "somme_obl_recue",
rev701 = "somme_libre_recue",
rev999 = "autres_ress",
c13111 = "impot_res_ppal",
c13141 = "impot_revenu",
c13121 = "impot_autres_res",
),
inplace = True
)
revenus['imphab'] = 0.65 * (revenus.impot_res_ppal + revenus.impot_autres_res)
revenus['impfon'] = 0.35 * (revenus.impot_res_ppal + revenus.impot_autres_res)
del revenus['impot_autres_res']
del revenus['impot_res_ppal']
loyers_imputes = temporary_store["depenses_bdf_{}".format(year)]
variables = ["0421"]
loyers_imputes = loyers_imputes[variables]
loyers_imputes.rename(
columns = {"0421": "loyer_impute"},
inplace = True,
)
temporary_store["loyers_imputes_{}".format(year)] = loyers_imputes
revenus = revenus.merge(loyers_imputes, left_index = True, right_index = True)
revenus['rev_disponible'] = revenus.revtot - revenus.impot_revenu - revenus.imphab
revenus['rev_disponible'] = revenus['rev_disponible'] * (revenus['rev_disponible'] >= 0)
revenus['rev_disp_loyerimput'] = revenus.rev_disponible + revenus.loyer_impute
temporary_store["revenus_{}".format(year)] = revenus
if __name__ == '__main__':
import sys
import time
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
deb = time.clock()
year = 2000
build_homogeneisation_revenus_menages(year = year)
log.info("step_0_4_homogeneisation_revenus_menages duration is {}".format(time.clock() - deb))
|
agpl-3.0
|
Myasuka/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
300
|
5078
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
bsd-3-clause
|
wzbozon/statsmodels
|
statsmodels/sandbox/examples/example_gam.py
|
33
|
2343
|
'''original example for checking how far GAM works
Note: uncomment plt.show() to display graphs
'''
example = 2 # 1,2 or 3
import numpy as np
import numpy.random as R
import matplotlib.pyplot as plt
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 150
x1 = R.standard_normal(nobs)
x1.sort()
x2 = R.standard_normal(nobs)
x2.sort()
y = R.standard_normal((nobs,))
f1 = lambda x1: (x1 + x1**2 - 3 - 1 * x1**3 + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 + x2**2 - 0.1 * np.exp(x2/4.))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) * 2 # 0.1
y += z
d = np.array([x1,x2]).T
if example == 1:
print("normal")
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print(m)
y_pred = m.results.predict(d)
plt.figure()
plt.plot(y, '.')
plt.plot(z, 'b-', label='true')
plt.plot(y_pred, 'r-', label='AdditiveModel')
plt.legend()
plt.title('gam.AdditiveModel')
import scipy.stats, time
if example == 2:
print("binomial")
f = family.Binomial()
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
if example == 3:
print("Poisson")
f = family.Poisson()
y = y/y.max() * 3
yp = f.link.inverse(y)
p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
|
bsd-3-clause
|
f3r/scikit-learn
|
sklearn/model_selection/tests/test_validation.py
|
20
|
27961
|
"""Test the validation module"""
from __future__ import division
import sys
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from test_split import MockClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) // 2
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y)
def test_cross_val_score_predict_labels():
# Check if ValueError (when labels is None) propagates to cross_val_score
# and cross_val_predict
# And also check if labels is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cross_val_score(reg, X, y, cv=5, scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_label, _, pvalue_label = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, labels=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_check_is_permutation():
p = np.arange(100)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
|
bsd-3-clause
|
gotomypc/scikit-learn
|
examples/svm/plot_svm_margin.py
|
318
|
2328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
teonlamont/mne-python
|
examples/decoding/plot_decoding_spoc_CMC.py
|
5
|
2818
|
"""
====================================
Continuous Target Decoding with SPoC
====================================
Source Power Comodulation (SPoC) [1]_ allows to identify the composition of
orthogonal spatial filters that maximally correlate with a continuous target.
SPoC can be seen as an extension of the CSP for continuous variables.
Here, SPoC is applied to decode the (continuous) fluctuation of an
electromyogram from MEG beta activity using data from `Cortico-Muscular
Coherence example of fieldtrip
<http://www.fieldtriptoolbox.org/tutorial/coherence>`_
References
----------
.. [1] Dahne, S., et al (2014). SPoC: a novel framework for relating the
amplitude of neuronal oscillations to behaviorally relevant parameters.
NeuroImage, 86, 111-122.
"""
# Author: Alexandre Barachant <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import Epochs
from mne.decoding import SPoC
from mne.datasets.fieldtrip_cmc import data_path
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.model_selection import KFold, cross_val_predict
# define parameters
fname = data_path() + '/SubjectCMC.ds'
raw = mne.io.read_raw_ctf(fname)
raw.crop(50., 250.).load_data() # crop for memory purposes
# Filter muscular activity to only keep high frequencies
emg = raw.copy().pick_channels(['EMGlft'])
emg.filter(20., None, fir_design='firwin')
# Filter MEG data to focus on alpha band
raw.pick_types(meg=True, ref_meg=True, eeg=False, eog=False)
raw.filter(15., 30., fir_design='firwin')
# Build epochs as sliding windows over the continuous raw file
events = mne.make_fixed_length_events(raw, id=1, duration=.250)
# Epoch length is 1.5 second
meg_epochs = Epochs(raw, events, tmin=0., tmax=1.500, baseline=None,
detrend=1, decim=8)
emg_epochs = Epochs(emg, events, tmin=0., tmax=1.500, baseline=None)
# Prepare classification
X = meg_epochs.get_data()
y = emg_epochs.get_data().var(axis=2)[:, 0] # target is EMG power
# Classification pipeline with SPoC spatial filtering and Ridge Regression
clf = make_pipeline(SPoC(n_components=2, log=True, reg='oas'), Ridge())
# Define a two fold cross-validation
cv = KFold(n_splits=2, shuffle=False)
# Run cross validaton
y_preds = cross_val_predict(clf, X, y, cv=cv)
# plot the True EMG power and the EMG power predicted from MEG data
fig, ax = plt.subplots(1, 1, figsize=[10, 4])
times = raw.times[meg_epochs.events[:, 0] - raw.first_samp]
ax.plot(times, y_preds, color='b', label='Predicted EMG')
ax.plot(times, y, color='r', label='True EMG')
ax.set_xlabel('Time (s)')
ax.set_ylabel('EMG Power')
ax.set_title('SPoC MEG Predictions')
plt.legend()
mne.viz.tight_layout()
plt.show()
|
bsd-3-clause
|
rvraghav93/scikit-learn
|
sklearn/linear_model/ridge.py
|
3
|
52817
|
"""
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However, only
'sag' and 'saga' supports sparse input when`fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
_dtype = [np.float64, np.float32]
# SAG needs X and y columns to be C-contiguous and np.float64
if solver in ['sag', 'saga']:
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=_dtype)
y = check_array(y, dtype=X.dtype, ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver not in ['sag', 'saga']:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver in ['sag', 'saga']:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i, 0,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init,
is_saga=solver == 'saga')
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
if self.solver in ('sag', 'saga'):
_dtype = np.float64
else:
# all other solvers work at both float precision levels
_dtype = [np.float64, np.float32]
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=_dtype,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However,
only 'sag' and 'saga' supports sparse input when `fit_intercept` is
True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float
Precision of the solution.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier, RidgeCV, :class:`sklearn.kernel_ridge.KernelRidge`
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its unbiased and more flexible version named SAGA. Both methods
use an iterative procedure, and are often faster than other solvers
when both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float
Precision of the solution.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag'.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
"""Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(alpha, y, v, Q, QT_y)
else:
out, c = _values(alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values. Will be cast to X's dtype if necessary
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, cv=self.cv, scoring=self.scoring)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used, else,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values. Will be cast to X's dtype if necessary
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
|
bsd-3-clause
|
olgabot/poshsplice
|
poshsplice/tests/test_hmmscan.py
|
1
|
2141
|
import os
import pandas as pd
import pandas.util.testing as pdt
import pytest
try:
# For Python 2
from StringIO import StringIO
except ImportError:
# For Python 3
from io import StringIO
@pytest.fixture
def example_hmmscan_out():
directory = os.path.dirname(__file__)
filename = '{0}/hmmscan_out.txt'.format(directory)
return filename
def test_read_hmmscan_out(example_hmmscan_out):
from poshsplice.hmmscan import read_hmmscan
test = read_hmmscan(example_hmmscan_out)
true_s = """,target_name,target_accession,target_length,query_name,query_accession,query_length,sequence_e_value,sequence_score,sequence_bias,domain_number,domain_total,domain_conditional_e_value,domain_independent_e_value,domain_score,domain_bias,target_start,target_stop,query_start,query_stop,query_domain_envelope_start,query_domain_envelope_stop,mean_posterior_probability,target_description# noqa
0,Fox-1_C,PF12414.3,93,sp|O43251|RFOX2_HUMAN,-,390,3.2e-39,133.2,29.5,1,2,0.23,670.0,0.7,0.0,14,48,177,213,166,243,0.66,Calcitonin gene-related peptide regulator C terminal# noqa
1,Fox-1_C,PF12414.3,93,sp|O43251|RFOX2_HUMAN,-,390,3.2e-39,133.2,29.5,2,2,8.900000000000001e-42,2.6e-38,130.2,27.3,2,93,265,362,264,362,0.97,Calcitonin gene-related peptide regulator C terminal# noqa
2,RRM_1,PF00076.17,70,sp|O43251|RFOX2_HUMAN,-,390,8e-19,67.0,0.1,1,1,5.9e-22,1.7000000000000002e-18,65.9,0.1,2,70,124,191,123,191,0.97,"RNA recognition motif. (a.k.a. RRM, RBD, or RNP domain)"
3,RRM_6,PF14259.1,70,sp|O43251|RFOX2_HUMAN,-,390,2.4e-15,56.2,0.1,1,1,1.3999999999999999e-18,4.3e-15,55.4,0.1,1,70,123,191,123,191,0.95,"RNA recognition motif (a.k.a. RRM, RBD, or RNP domain)"
4,RRM_5,PF13893.1,56,sp|O43251|RFOX2_HUMAN,-,390,8.099999999999999e-11,41.6,0.1,1,1,5.9e-14,1.8000000000000002e-10,40.5,0.1,1,54,137,193,137,195,0.9,"RNA recognition motif. (a.k.a. RRM, RBD, or RNP domain)"
5,RRM_3,PF08777.6,105,sp|O43251|RFOX2_HUMAN,-,390,0.084,12.7,0.0,1,1,6.7e-05,0.2,11.5,0.0,17,79,136,202,127,206,0.83,RNA binding motif# noqa
"""
true = pd.read_csv(StringIO(true_s), index_col=0, comment='#')
pdt.assert_frame_equal(test, true)
|
bsd-3-clause
|
njsmith/pycam02ucs
|
pycam02ucs/cm/bezierbuilder.py
|
3
|
7029
|
# coding=utf8
# BézierBuilder
#
# Copyright (c) 2013, Juan Luis Cano Rodríguez <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""BézierBuilder, an interactive Bézier curve explorer.
Just run it with
$ python bezier_builder.py
"""
import numpy as np
from scipy.special import binom
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from .minimvc import Trigger
class BezierModel(object):
def __init__(self, xp, yp):
self._xp = list(xp)
self._yp = list(yp)
self.trigger = Trigger()
def get_control_points(self):
return list(self._xp), list(self._yp)
def get_bezier_points(self, num=200):
return self.get_bezier_points_at(np.linspace(0, 1, num))
def get_bezier_points_at(self, at, grid=256):
at = np.asarray(at)
# The Bezier curve is parameterized by a value t which ranges from 0
# to 1. However, there is a nonlinear relationship between this value
# and arclength. We want to parameterize by t', which measures
# normalized arclength. To do this, we have to calculate the function
# arclength(t), and then invert it.
t = np.linspace(0, 1, grid)
x, y = Bezier(list(zip(self._xp, self._yp)), t).T
x_deltas = np.diff(x)
y_deltas = np.diff(y)
arclength_deltas = np.empty(t.shape)
arclength_deltas[0] = 0
np.hypot(x_deltas, y_deltas, out=arclength_deltas[1:])
arclength = np.cumsum(arclength_deltas)
arclength /= arclength[-1]
# Now (t, arclength) is a LUT describing the t -> arclength mapping
# Invert it to get at -> t
at_t = np.interp(at, arclength, t)
# And finally look up at the Bezier values at at_t
# (Might be quicker to np.interp againts x and y, but eh, doesn't
# really matter.)
return Bezier(list(zip(self._xp, self._yp)), at_t).T
def add_point(self, i, new_x, new_y):
self._xp.insert(i, new_x)
self._yp.insert(i, new_y)
self.trigger.fire()
def remove_point(self, i):
del self._xp[i]
del self._yp[i]
self.trigger.fire()
def move_point(self, i, new_x, new_y):
self._xp[i] = new_x
self._yp[i] = new_y
self.trigger.fire()
def set_control_points(self, xp, yp):
self._xp = list(xp)
self._yp = list(yp)
self.trigger.fire()
class BezierBuilder(object):
"""Bézier curve interactive builder.
"""
def __init__(self, ax, bezier_model):
self.ax = ax
self.bezier_model = bezier_model
self.canvas = self.ax.figure.canvas
xp, yp = self.bezier_model.get_control_points()
self.control_polygon = Line2D(xp, yp,
ls="--", c="#666666", marker="x",
mew=2, mec="#204a87")
self.ax.add_line(self.control_polygon)
x, y = self.bezier_model.get_bezier_points()
self.bezier_curve = Line2D(x, y)
self.ax.add_line(self.bezier_curve)
# Event handler for mouse clicking
self.canvas.mpl_connect('button_press_event', self.on_button_press)
self.canvas.mpl_connect('button_release_event', self.on_button_release)
self.canvas.mpl_connect('motion_notify_event', self.on_motion_notify)
self._index = None # Active vertex
self.bezier_model.trigger.add_callback(self._refresh)
self._refresh()
def __del__(self):
self.bezier_model.trigger.remove_callback(self._refresh)
def on_button_press(self, event):
# Ignore clicks outside axes
if event.inaxes != self.ax: return
res, ind = self.control_polygon.contains(event)
if res and event.key is None:
# Grabbing a point to drag
self._index = ind["ind"][0]
if res and event.key == "control":
# Control-click deletes
self.bezier_model.remove_point(ind["ind"][0])
if event.key == "shift":
# Adding a new point. Find the two closest points and insert it in
# between them.
total_squared_dists = []
xp, yp = self.bezier_model.get_control_points()
for i in range(len(xp) - 1):
dist = (event.xdata - xp[i]) ** 2
dist += (event.ydata - yp[i]) ** 2
dist += (event.xdata - xp[i + 1]) ** 2
dist += (event.ydata - yp[i + 1]) ** 2
total_squared_dists.append(dist)
best = np.argmin(total_squared_dists)
self.bezier_model.add_point(best + 1, event.xdata, event.ydata)
def on_button_release(self, event):
if event.button != 1: return
self._index = None
def on_motion_notify(self, event):
if event.inaxes != self.ax: return
if self._index is None: return
x, y = event.xdata, event.ydata
self.bezier_model.move_point(self._index, x, y)
def _refresh(self):
xp, yp = self.bezier_model.get_control_points()
self.control_polygon.set_data(xp, yp)
x, y = self.bezier_model.get_bezier_points()
self.bezier_curve.set_data(x, y)
self.canvas.draw()
def Bernstein(n, k):
"""Bernstein polynomial.
"""
coeff = binom(n, k)
def _bpoly(x):
return coeff * x ** k * (1 - x) ** (n - k)
return _bpoly
def Bezier(points, at):
"""Build Bézier curve from points.
"""
at = np.asarray(at)
at_flat = at.ravel()
N = len(points)
curve = np.zeros((at_flat.shape[0], 2))
for ii in range(N):
curve += np.outer(Bernstein(N - 1, ii)(at_flat), points[ii])
return curve.reshape(at.shape + (2,))
|
mit
|
cbertinato/pandas
|
pandas/tests/indexing/interval/test_interval_new.py
|
1
|
7246
|
import numpy as np
import pytest
from pandas import Interval, IntervalIndex, Series
import pandas.util.testing as tm
pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316")
class TestIntervalIndex:
def setup_method(self, method):
self.s = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))
def test_loc_with_interval(self):
# loc with single label / list of labels:
# - Intervals: only exact matches
# - scalars: those that contain it
s = self.s
expected = 0
result = s.loc[Interval(0, 1)]
assert result == expected
result = s[Interval(0, 1)]
assert result == expected
expected = s.iloc[3:5]
result = s.loc[[Interval(3, 4), Interval(4, 5)]]
tm.assert_series_equal(expected, result)
result = s[[Interval(3, 4), Interval(4, 5)]]
tm.assert_series_equal(expected, result)
# missing or not exact
with pytest.raises(KeyError):
s.loc[Interval(3, 5, closed='left')]
with pytest.raises(KeyError):
s[Interval(3, 5, closed='left')]
with pytest.raises(KeyError):
s[Interval(3, 5)]
with pytest.raises(KeyError):
s.loc[Interval(3, 5)]
with pytest.raises(KeyError):
s[Interval(3, 5)]
with pytest.raises(KeyError):
s.loc[Interval(-2, 0)]
with pytest.raises(KeyError):
s[Interval(-2, 0)]
with pytest.raises(KeyError):
s.loc[Interval(5, 6)]
with pytest.raises(KeyError):
s[Interval(5, 6)]
def test_loc_with_scalar(self):
# loc with single label / list of labels:
# - Intervals: only exact matches
# - scalars: those that contain it
s = self.s
assert s.loc[1] == 0
assert s.loc[1.5] == 1
assert s.loc[2] == 1
# TODO with __getitem__ same rules as loc, or positional ?
# assert s[1] == 0
# assert s[1.5] == 1
# assert s[2] == 1
expected = s.iloc[1:4]
tm.assert_series_equal(expected, s.loc[[1.5, 2.5, 3.5]])
tm.assert_series_equal(expected, s.loc[[2, 3, 4]])
tm.assert_series_equal(expected, s.loc[[1.5, 3, 4]])
expected = s.iloc[[1, 1, 2, 1]]
tm.assert_series_equal(expected, s.loc[[1.5, 2, 2.5, 1.5]])
expected = s.iloc[2:5]
tm.assert_series_equal(expected, s.loc[s >= 2])
def test_loc_with_slices(self):
# loc with slices:
# - Interval objects: only works with exact matches
# - scalars: only works for non-overlapping, monotonic intervals,
# and start/stop select location based on the interval that
# contains them:
# (slice_loc(start, stop) == (idx.get_loc(start), idx.get_loc(stop))
s = self.s
# slice of interval
expected = s.iloc[:3]
result = s.loc[Interval(0, 1):Interval(2, 3)]
tm.assert_series_equal(expected, result)
result = s[Interval(0, 1):Interval(2, 3)]
tm.assert_series_equal(expected, result)
expected = s.iloc[4:]
result = s.loc[Interval(3, 4):]
tm.assert_series_equal(expected, result)
result = s[Interval(3, 4):]
tm.assert_series_equal(expected, result)
with pytest.raises(KeyError):
s.loc[Interval(3, 6):]
with pytest.raises(KeyError):
s[Interval(3, 6):]
with pytest.raises(KeyError):
s.loc[Interval(3, 4, closed='left'):]
with pytest.raises(KeyError):
s[Interval(3, 4, closed='left'):]
# TODO with non-existing intervals ?
# s.loc[Interval(-1, 0):Interval(2, 3)]
# slice of scalar
expected = s.iloc[:3]
tm.assert_series_equal(expected, s.loc[:3])
tm.assert_series_equal(expected, s.loc[:2.5])
tm.assert_series_equal(expected, s.loc[0.1:2.5])
# TODO should this work? (-1 is not contained in any of the Intervals)
# tm.assert_series_equal(expected, s.loc[-1:3])
# TODO with __getitem__ same rules as loc, or positional ?
# tm.assert_series_equal(expected, s[:3])
# tm.assert_series_equal(expected, s[:2.5])
# tm.assert_series_equal(expected, s[0.1:2.5])
# slice of scalar with step != 1
with pytest.raises(NotImplementedError):
s[0:4:2]
def test_loc_with_overlap(self):
idx = IntervalIndex.from_tuples([(1, 5), (3, 7)])
s = Series(range(len(idx)), index=idx)
# scalar
expected = s
result = s.loc[4]
tm.assert_series_equal(expected, result)
result = s[4]
tm.assert_series_equal(expected, result)
result = s.loc[[4]]
tm.assert_series_equal(expected, result)
result = s[[4]]
tm.assert_series_equal(expected, result)
# interval
expected = 0
result = s.loc[Interval(1, 5)]
tm.assert_series_equal(expected, result)
result = s[Interval(1, 5)]
tm.assert_series_equal(expected, result)
expected = s
result = s.loc[[Interval(1, 5), Interval(3, 7)]]
tm.assert_series_equal(expected, result)
result = s[[Interval(1, 5), Interval(3, 7)]]
tm.assert_series_equal(expected, result)
with pytest.raises(KeyError):
s.loc[Interval(3, 5)]
with pytest.raises(KeyError):
s.loc[[Interval(3, 5)]]
with pytest.raises(KeyError):
s[Interval(3, 5)]
with pytest.raises(KeyError):
s[[Interval(3, 5)]]
# slices with interval (only exact matches)
expected = s
result = s.loc[Interval(1, 5):Interval(3, 7)]
tm.assert_series_equal(expected, result)
result = s[Interval(1, 5):Interval(3, 7)]
tm.assert_series_equal(expected, result)
with pytest.raises(KeyError):
s.loc[Interval(1, 6):Interval(3, 8)]
with pytest.raises(KeyError):
s[Interval(1, 6):Interval(3, 8)]
# slices with scalar raise for overlapping intervals
# TODO KeyError is the appropriate error?
with pytest.raises(KeyError):
s.loc[1:4]
def test_non_unique(self):
idx = IntervalIndex.from_tuples([(1, 3), (3, 7)])
s = Series(range(len(idx)), index=idx)
result = s.loc[Interval(1, 3)]
assert result == 0
result = s.loc[[Interval(1, 3)]]
expected = s.iloc[0:1]
tm.assert_series_equal(expected, result)
def test_non_unique_moar(self):
idx = IntervalIndex.from_tuples([(1, 3), (1, 3), (3, 7)])
s = Series(range(len(idx)), index=idx)
expected = s.iloc[[0, 1]]
result = s.loc[Interval(1, 3)]
tm.assert_series_equal(expected, result)
expected = s
result = s.loc[Interval(1, 3):]
tm.assert_series_equal(expected, result)
expected = s
result = s[Interval(1, 3):]
tm.assert_series_equal(expected, result)
expected = s.iloc[[0, 1]]
result = s[[Interval(1, 3)]]
tm.assert_series_equal(expected, result)
|
bsd-3-clause
|
alexzatsepin/omim
|
search/search_quality/scoring_model.py
|
1
|
9849
|
#!/usr/bin/env python3
from math import exp, log
from scipy.stats import pearsonr, t
from sklearn import svm
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.utils import resample
import argparse
import collections
import itertools
import numpy as np
import pandas as pd
import random
import sys
MAX_DISTANCE_METERS = 2e6
MAX_RANK = 255
MAX_POPULARITY = 255
RELEVANCES = {'Harmful': -3, 'Irrelevant': 0, 'Relevant': 1, 'Vital': 3}
NAME_SCORES = ['Zero', 'Substring', 'Prefix', 'Full Match']
SEARCH_TYPES = ['POI', 'Building', 'Street', 'Unclassified', 'Village', 'City', 'State', 'Country']
FEATURES = ['DistanceToPivot', 'Rank', 'Popularity', 'FalseCats', 'ErrorsMade', 'AllTokensUsed',
'CategorialRequest', 'HasName'] + NAME_SCORES + SEARCH_TYPES
BOOTSTRAP_ITERATIONS = 10000
def transform_name_score(value, categories_match):
if categories_match == 1:
return 'Zero'
else:
return value
def normalize_data(data):
transform_distance = lambda v: min(v, MAX_DISTANCE_METERS) / MAX_DISTANCE_METERS
data['DistanceToPivot'] = data['DistanceToPivot'].apply(transform_distance)
data['Rank'] = data['Rank'].apply(lambda v: v / MAX_RANK)
data['Popularity'] = data['Popularity'].apply(lambda v: v / MAX_POPULARITY)
data['Relevance'] = data['Relevance'].apply(lambda v: RELEVANCES[v])
cats = data['PureCats'].combine(data['FalseCats'], max)
# TODO (@y, @m): do forward/backward/subset selection of features
# instead of this merging. It would be great to conduct PCA on
# the features too.
data['NameScore'] = data['NameScore'].combine(cats, transform_name_score)
# Adds dummy variables to data for NAME_SCORES.
for ns in NAME_SCORES:
data[ns] = data['NameScore'].apply(lambda v: int(ns == v))
# Adds dummy variables to data for SEARCH_TYPES.
# We unify BUILDING with POI here, as we don't have enough
# training data to distinguish between them. Remove following
# line as soon as the model will be changed or we will have enough
# training data.
data['SearchType'] = data['SearchType'].apply(lambda v: v if v != 'Building' else 'POI')
for st in SEARCH_TYPES:
data[st] = data['SearchType'].apply(lambda v: int(st == v))
def compute_ndcg(relevances):
"""
Computes NDCG (Normalized Discounted Cumulative Gain) for a given
array of scores.
"""
dcg = sum(r / log(2 + i, 2) for i, r in enumerate(relevances))
dcg_norm = sum(r / log(2 + i, 2) for i, r in enumerate(sorted(relevances, reverse=True)))
return dcg / dcg_norm if dcg_norm != 0 else 0
def compute_ndcgs_without_ws(data):
"""
Computes NDCG (Normalized Discounted Cumulative Gain) for a given
data. Returns an array of ndcg scores in the shape [num groups of
features].
"""
grouped = data.groupby(data['SampleId'], sort=False).groups
ndcgs = []
for id in grouped:
indices = grouped[id]
relevances = np.array(data.ix[indices]['Relevance'])
ndcgs.append(compute_ndcg(relevances))
return ndcgs
def compute_ndcgs_for_ws(data, ws):
"""
Computes NDCG (Normalized Discounted Cumulative Gain) for a given
data and an array of coeffs in a linear model. Returns an array of
ndcg scores in the shape [num groups of features].
"""
data_scores = np.array([np.dot(data.ix[i][FEATURES], ws) for i in data.index])
grouped = data.groupby(data['SampleId'], sort=False).groups
ndcgs = []
for id in grouped:
indices = grouped[id]
relevances = np.array(data.ix[indices]['Relevance'])
scores = data_scores[indices]
# Reoders relevances in accordance with decreasing scores.
relevances = relevances[scores.argsort()[::-1]]
ndcgs.append(compute_ndcg(relevances))
return ndcgs
def transform_data(data):
"""
By a given data computes x and y that can be used as an input to a
linear SVM.
"""
grouped = data.groupby(data['SampleId'], sort=False)
xs, ys = [], []
# k is used to create a balanced samples set for better linear
# separation.
k = 1
for _, group in grouped:
features, relevances = group[FEATURES], group['Relevance']
n, total = len(group), 0
for _, (i, j) in enumerate(itertools.combinations(range(n), 2)):
dr = relevances.iloc[j] - relevances.iloc[i]
y = np.sign(dr)
if y == 0:
continue
x = np.array(features.iloc[j]) - np.array(features.iloc[i])
# Need to multiply x by average drop in NDCG when i-th and
# j-th are exchanged.
x *= abs(dr * (1 / log(j + 2, 2) - 1 / log(i + 2, 2)))
# This is needed to prevent disbalance in classes sizes.
if y != k:
x = np.negative(x)
y = -y
xs.append(x)
ys.append(y)
total += 1
k = -k
# Scales this group of features to equalize different search
# queries.
for i in range(-1, -total, -1):
xs[i] = xs[i] / total
return xs, ys
def show_pearson_statistics(xs, ys, features):
"""
Shows info about Pearson coefficient between features and
relevancy.
"""
print('***** Correlation table *****')
print('H0 - feature not is correlated with relevancy')
print('H1 - feature is correlated with relevancy')
print()
cs, ncs = [], []
for i, f in enumerate(features):
zs = [x[i] for x in xs]
(c, p) = pearsonr(zs, ys)
correlated = p < 0.05
print('{}: pearson={:.3f}, P(H1)={}'.format(f, c, 1 - p))
if correlated:
cs.append(f)
else:
ncs.append(f)
print()
print('Correlated:', cs)
print('Non-correlated:', ncs)
def raw_output(features, ws):
"""
Prints feature-coeff pairs to the standard output.
"""
print('{:<20}{}'.format('Feature', 'Value'))
print()
for f, w in zip(features, ws):
print('{:<20}{:.5f}'.format(f, w))
def print_const(name, value):
print('double constexpr k{} = {:.7f};'.format(name, value))
def print_array(name, size, values):
print('double constexpr {}[{}] = {{'.format(name, size))
print(',\n'.join(' {:.7f} /* {} */'.format(w, f) for (f, w) in values))
print('};')
def cpp_output(features, ws):
"""
Prints feature-coeff pairs in the C++-compatible format.
"""
ns, st = [], []
for f, w in zip(features, ws):
if f in NAME_SCORES:
ns.append((f, w))
elif f in SEARCH_TYPES:
st.append((f, w))
else:
print_const(f, w)
print_array('kNameScore', 'NameScore::NAME_SCORE_COUNT', ns)
print_array('kType', 'Model::TYPE_COUNT', st)
def show_bootstrap_statistics(clf, X, y, features):
num_features = len(features)
coefs = []
for i in range(num_features):
coefs.append([])
for _ in range(BOOTSTRAP_ITERATIONS):
X_sample, y_sample = resample(X, y)
clf.fit(X_sample, y_sample)
for i, c in enumerate(get_normalized_coefs(clf)):
coefs[i].append(c)
poi_index = features.index('POI')
building_index = features.index('Building')
coefs[building_index] = coefs[poi_index]
intervals = []
print()
print('***** Bootstrap statistics *****')
print('{:<20}{:<20}{:<10}{:<10}'.format('Feature', '95% interval', 't-value', 'Pr(>|t|)'))
print()
for i, cs in enumerate(coefs):
values = np.array(cs)
lo = np.percentile(values, 2.5)
hi = np.percentile(values, 97.5)
interval = '({:.3f}, {:.3f})'.format(lo, hi)
tv = np.mean(values) / np.std(values)
pr = (1.0 - t.cdf(x=abs(tv), df=len(values))) * 0.5
stv = '{:.3f}'.format(tv)
spr = '{:.3f}'.format(pr)
print('{:<20}{:<20}{:<10}{:<10}'.format(features[i], interval, stv, spr))
def get_normalized_coefs(clf):
ws = clf.coef_[0]
max_w = max(abs(w) for w in ws)
return np.divide(ws, max_w)
def main(args):
data = pd.read_csv(sys.stdin)
normalize_data(data)
ndcgs = compute_ndcgs_without_ws(data);
print('Current NDCG: {:.3f}, std: {:.3f}'.format(np.mean(ndcgs), np.std(ndcgs)))
print()
xs, ys = transform_data(data)
clf = svm.LinearSVC(random_state=args.seed)
cv = KFold(n_splits=5, shuffle=True, random_state=args.seed)
# "C" stands for the regularizer constant.
grid = {'C': np.power(10.0, np.arange(-5, 6))}
gs = GridSearchCV(clf, grid, scoring='roc_auc', cv=cv)
gs.fit(xs, ys)
print('Best params: {}'.format(gs.best_params_))
ws = get_normalized_coefs(gs.best_estimator_)
# Following code restores coeffs for merged features.
ws[FEATURES.index('Building')] = ws[FEATURES.index('POI')]
ndcgs = compute_ndcgs_for_ws(data, ws)
print('NDCG mean: {:.3f}, std: {:.3f}'.format(np.mean(ndcgs), np.std(ndcgs)))
print('ROC AUC: {:.3f}'.format(gs.best_score_))
if args.pearson:
print()
show_pearson_statistics(xs, ys, FEATURES)
print()
print('***** Linear model weights *****')
if args.cpp:
cpp_output(FEATURES, ws)
else:
raw_output(FEATURES, ws)
if args.bootstrap:
show_bootstrap_statistics(clf, xs, ys, FEATURES)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--seed', help='random seed', type=int)
parser.add_argument('--pearson', help='show pearson statistics', action='store_true')
parser.add_argument('--cpp', help='generate output in the C++ format', action='store_true')
parser.add_argument('--bootstrap', help='show bootstrap confidence intervals', action='store_true')
args = parser.parse_args()
main(args)
|
apache-2.0
|
heli522/scikit-learn
|
examples/applications/wikipedia_principal_eigenvector.py
|
233
|
7819
|
"""
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
|
bsd-3-clause
|
aleonliao/webrtc-trunk
|
modules/remote_bitrate_estimator/test/plot_dynamics.py
|
11
|
4681
|
#!/usr/bin/env python
# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This script is used to plot simulation dynamics.
# Able to plot each flow separately. Other plot boxes can be added,
# currently one for Throughput, one for Latency and one for Packet Loss.
import matplotlib
import matplotlib.pyplot as plt
import numpy
import re
import sys
# Change this to True to save the figure to a file. Look below for details.
save_figure = False
class Variable(object):
def __init__(self, variable):
self._ID = variable[0]
self._xlabel = variable[1]
self._ylabel = variable[2]
self._subplot = variable[3]
self._y_max = variable[4]
self.samples = dict()
def getID(self):
return self._ID
def getXLabel(self):
return self._xlabel
def getYLabel(self):
return self._ylabel
def getSubplot(self):
return self._subplot
def getYMax(self):
return self._y_max
def getNumberOfFlows(self):
return len(self.samples)
def addSample(self, line):
groups = re.search(r'_(((\d)+((,(\d)+)*))_(\D+))#\d@(\S+)', line)
# Each variable will be plotted in a separated box.
var_name = groups.group(1)
alg_name = groups.group(8)
alg_name = alg_name.replace('_', ' ')
if alg_name not in self.samples.keys():
self.samples[alg_name] = {}
if var_name not in self.samples[alg_name].keys():
self.samples[alg_name][var_name] = []
sample = re.search(r'(\d+\.\d+)\t([-]?\d+\.\d+)', line)
s = (sample.group(1), sample.group(2))
self.samples[alg_name][var_name].append(s)
def plotVar(v, ax, show_legend, show_x_label):
if show_x_label:
ax.set_xlabel(v.getXLabel(), fontsize='large')
ax.set_ylabel(v.getYLabel(), fontsize='large')
for alg in v.samples.keys():
for series in v.samples[alg].keys():
x = [sample[0] for sample in v.samples[alg][series]]
y = [sample[1] for sample in v.samples[alg][series]]
x = numpy.array(x)
y = numpy.array(y)
line = plt.plot(x, y, label=alg, linewidth=4.0)
colormap = {'Available0':'#AAAAAA',
'Available1':'#AAAAAA',
'GCC0':'#80D000',
'GCC1':'#008000',
'GCC2':'#00F000',
'GCC3':'#00B000',
'GCC4':'#70B020',
'NADA0':'#0000AA',
'NADA1':'#A0A0FF',
'NADA2':'#0000FF',
'NADA3':'#C0A0FF',
'NADA4':'#9060B0',}
flow_id = re.search(r'(\d+(,\d+)*)', series) # One or multiple ids.
key = alg + flow_id.group(1)
if key in colormap:
plt.setp(line, color=colormap[key])
elif alg == 'TCP':
plt.setp(line, color='#AAAAAA')
else:
plt.setp(line, color='#654321')
if alg.startswith('Available'):
plt.setp(line, linestyle='--')
plt.grid(True)
# x1, x2, y1, y2
_, x2, _, y2 = plt.axis()
if v.getYMax() >= 0:
y2 = v.getYMax()
plt.axis((0, x2, 0, y2))
if show_legend:
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.40),
shadow=True, fontsize='large', ncol=len(v.samples))
def main():
variables = [
('Throughput_kbps', "Time (s)", "Throughput (kbps)", 1, 4000),
('Delay_ms', "Time (s)", "One-way Delay (ms)", 2, 500),
('Packet_Loss', "Time (s)", "Packet Loss Ratio", 3, 1.0),
# ('Sending_Estimate_kbps', "Time (s)", "Sending Estimate (kbps)",
# 4, 4000),
]
var = []
# Create objects.
for variable in variables:
var.append(Variable(variable))
# Add samples to the objects.
for line in sys.stdin:
if line.startswith("[ RUN ]"):
test_name = re.search(r'\.(\w+)', line).group(1)
if line.startswith("PLOT"):
for v in var:
if v.getID() in line:
v.addSample(line)
matplotlib.rcParams.update({'font.size': 48/len(variables)})
# Plot variables.
fig = plt.figure()
# Offest and threshold on the same plot.
n = var[-1].getSubplot()
i = 0
for v in var:
ax = fig.add_subplot(n, 1, v.getSubplot())
plotVar(v, ax, i == 0, i == n - 1)
i += 1
if save_figure:
fig.savefig(test_name + ".png")
plt.show()
if __name__ == '__main__':
main()
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.